i915_gem.c 130 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125
  1. /*
  2. * Copyright © 2008-2015 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_vma_manager.h>
  29. #include <drm/i915_drm.h>
  30. #include "i915_drv.h"
  31. #include "i915_vgpu.h"
  32. #include "i915_trace.h"
  33. #include "intel_drv.h"
  34. #include <linux/shmem_fs.h>
  35. #include <linux/slab.h>
  36. #include <linux/swap.h>
  37. #include <linux/pci.h>
  38. #include <linux/dma-buf.h>
  39. #define RQ_BUG_ON(expr)
  40. static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
  41. static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
  42. static void
  43. i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
  44. static void
  45. i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
  46. static bool cpu_cache_is_coherent(struct drm_device *dev,
  47. enum i915_cache_level level)
  48. {
  49. return HAS_LLC(dev) || level != I915_CACHE_NONE;
  50. }
  51. static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
  52. {
  53. if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
  54. return true;
  55. return obj->pin_display;
  56. }
  57. /* some bookkeeping */
  58. static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
  59. size_t size)
  60. {
  61. spin_lock(&dev_priv->mm.object_stat_lock);
  62. dev_priv->mm.object_count++;
  63. dev_priv->mm.object_memory += size;
  64. spin_unlock(&dev_priv->mm.object_stat_lock);
  65. }
  66. static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
  67. size_t size)
  68. {
  69. spin_lock(&dev_priv->mm.object_stat_lock);
  70. dev_priv->mm.object_count--;
  71. dev_priv->mm.object_memory -= size;
  72. spin_unlock(&dev_priv->mm.object_stat_lock);
  73. }
  74. static int
  75. i915_gem_wait_for_error(struct i915_gpu_error *error)
  76. {
  77. int ret;
  78. #define EXIT_COND (!i915_reset_in_progress(error) || \
  79. i915_terminally_wedged(error))
  80. if (EXIT_COND)
  81. return 0;
  82. /*
  83. * Only wait 10 seconds for the gpu reset to complete to avoid hanging
  84. * userspace. If it takes that long something really bad is going on and
  85. * we should simply try to bail out and fail as gracefully as possible.
  86. */
  87. ret = wait_event_interruptible_timeout(error->reset_queue,
  88. EXIT_COND,
  89. 10*HZ);
  90. if (ret == 0) {
  91. DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
  92. return -EIO;
  93. } else if (ret < 0) {
  94. return ret;
  95. }
  96. #undef EXIT_COND
  97. return 0;
  98. }
  99. int i915_mutex_lock_interruptible(struct drm_device *dev)
  100. {
  101. struct drm_i915_private *dev_priv = dev->dev_private;
  102. int ret;
  103. ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  104. if (ret)
  105. return ret;
  106. ret = mutex_lock_interruptible(&dev->struct_mutex);
  107. if (ret)
  108. return ret;
  109. WARN_ON(i915_verify_lists(dev));
  110. return 0;
  111. }
  112. int
  113. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  114. struct drm_file *file)
  115. {
  116. struct drm_i915_private *dev_priv = dev->dev_private;
  117. struct drm_i915_gem_get_aperture *args = data;
  118. struct i915_gtt *ggtt = &dev_priv->gtt;
  119. struct i915_vma *vma;
  120. size_t pinned;
  121. pinned = 0;
  122. mutex_lock(&dev->struct_mutex);
  123. list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
  124. if (vma->pin_count)
  125. pinned += vma->node.size;
  126. list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
  127. if (vma->pin_count)
  128. pinned += vma->node.size;
  129. mutex_unlock(&dev->struct_mutex);
  130. args->aper_size = dev_priv->gtt.base.total;
  131. args->aper_available_size = args->aper_size - pinned;
  132. return 0;
  133. }
  134. static int
  135. i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
  136. {
  137. struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
  138. char *vaddr = obj->phys_handle->vaddr;
  139. struct sg_table *st;
  140. struct scatterlist *sg;
  141. int i;
  142. if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
  143. return -EINVAL;
  144. for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
  145. struct page *page;
  146. char *src;
  147. page = shmem_read_mapping_page(mapping, i);
  148. if (IS_ERR(page))
  149. return PTR_ERR(page);
  150. src = kmap_atomic(page);
  151. memcpy(vaddr, src, PAGE_SIZE);
  152. drm_clflush_virt_range(vaddr, PAGE_SIZE);
  153. kunmap_atomic(src);
  154. page_cache_release(page);
  155. vaddr += PAGE_SIZE;
  156. }
  157. i915_gem_chipset_flush(obj->base.dev);
  158. st = kmalloc(sizeof(*st), GFP_KERNEL);
  159. if (st == NULL)
  160. return -ENOMEM;
  161. if (sg_alloc_table(st, 1, GFP_KERNEL)) {
  162. kfree(st);
  163. return -ENOMEM;
  164. }
  165. sg = st->sgl;
  166. sg->offset = 0;
  167. sg->length = obj->base.size;
  168. sg_dma_address(sg) = obj->phys_handle->busaddr;
  169. sg_dma_len(sg) = obj->base.size;
  170. obj->pages = st;
  171. return 0;
  172. }
  173. static void
  174. i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
  175. {
  176. int ret;
  177. BUG_ON(obj->madv == __I915_MADV_PURGED);
  178. ret = i915_gem_object_set_to_cpu_domain(obj, true);
  179. if (ret) {
  180. /* In the event of a disaster, abandon all caches and
  181. * hope for the best.
  182. */
  183. WARN_ON(ret != -EIO);
  184. obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  185. }
  186. if (obj->madv == I915_MADV_DONTNEED)
  187. obj->dirty = 0;
  188. if (obj->dirty) {
  189. struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
  190. char *vaddr = obj->phys_handle->vaddr;
  191. int i;
  192. for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
  193. struct page *page;
  194. char *dst;
  195. page = shmem_read_mapping_page(mapping, i);
  196. if (IS_ERR(page))
  197. continue;
  198. dst = kmap_atomic(page);
  199. drm_clflush_virt_range(vaddr, PAGE_SIZE);
  200. memcpy(dst, vaddr, PAGE_SIZE);
  201. kunmap_atomic(dst);
  202. set_page_dirty(page);
  203. if (obj->madv == I915_MADV_WILLNEED)
  204. mark_page_accessed(page);
  205. page_cache_release(page);
  206. vaddr += PAGE_SIZE;
  207. }
  208. obj->dirty = 0;
  209. }
  210. sg_free_table(obj->pages);
  211. kfree(obj->pages);
  212. }
  213. static void
  214. i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
  215. {
  216. drm_pci_free(obj->base.dev, obj->phys_handle);
  217. }
  218. static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
  219. .get_pages = i915_gem_object_get_pages_phys,
  220. .put_pages = i915_gem_object_put_pages_phys,
  221. .release = i915_gem_object_release_phys,
  222. };
  223. static int
  224. drop_pages(struct drm_i915_gem_object *obj)
  225. {
  226. struct i915_vma *vma, *next;
  227. int ret;
  228. drm_gem_object_reference(&obj->base);
  229. list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
  230. if (i915_vma_unbind(vma))
  231. break;
  232. ret = i915_gem_object_put_pages(obj);
  233. drm_gem_object_unreference(&obj->base);
  234. return ret;
  235. }
  236. int
  237. i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
  238. int align)
  239. {
  240. drm_dma_handle_t *phys;
  241. int ret;
  242. if (obj->phys_handle) {
  243. if ((unsigned long)obj->phys_handle->vaddr & (align -1))
  244. return -EBUSY;
  245. return 0;
  246. }
  247. if (obj->madv != I915_MADV_WILLNEED)
  248. return -EFAULT;
  249. if (obj->base.filp == NULL)
  250. return -EINVAL;
  251. ret = drop_pages(obj);
  252. if (ret)
  253. return ret;
  254. /* create a new object */
  255. phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
  256. if (!phys)
  257. return -ENOMEM;
  258. obj->phys_handle = phys;
  259. obj->ops = &i915_gem_phys_ops;
  260. return i915_gem_object_get_pages(obj);
  261. }
  262. static int
  263. i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
  264. struct drm_i915_gem_pwrite *args,
  265. struct drm_file *file_priv)
  266. {
  267. struct drm_device *dev = obj->base.dev;
  268. void *vaddr = obj->phys_handle->vaddr + args->offset;
  269. char __user *user_data = to_user_ptr(args->data_ptr);
  270. int ret = 0;
  271. /* We manually control the domain here and pretend that it
  272. * remains coherent i.e. in the GTT domain, like shmem_pwrite.
  273. */
  274. ret = i915_gem_object_wait_rendering(obj, false);
  275. if (ret)
  276. return ret;
  277. intel_fb_obj_invalidate(obj, ORIGIN_CPU);
  278. if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
  279. unsigned long unwritten;
  280. /* The physical object once assigned is fixed for the lifetime
  281. * of the obj, so we can safely drop the lock and continue
  282. * to access vaddr.
  283. */
  284. mutex_unlock(&dev->struct_mutex);
  285. unwritten = copy_from_user(vaddr, user_data, args->size);
  286. mutex_lock(&dev->struct_mutex);
  287. if (unwritten) {
  288. ret = -EFAULT;
  289. goto out;
  290. }
  291. }
  292. drm_clflush_virt_range(vaddr, args->size);
  293. i915_gem_chipset_flush(dev);
  294. out:
  295. intel_fb_obj_flush(obj, false, ORIGIN_CPU);
  296. return ret;
  297. }
  298. void *i915_gem_object_alloc(struct drm_device *dev)
  299. {
  300. struct drm_i915_private *dev_priv = dev->dev_private;
  301. return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
  302. }
  303. void i915_gem_object_free(struct drm_i915_gem_object *obj)
  304. {
  305. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  306. kmem_cache_free(dev_priv->objects, obj);
  307. }
  308. static int
  309. i915_gem_create(struct drm_file *file,
  310. struct drm_device *dev,
  311. uint64_t size,
  312. uint32_t *handle_p)
  313. {
  314. struct drm_i915_gem_object *obj;
  315. int ret;
  316. u32 handle;
  317. size = roundup(size, PAGE_SIZE);
  318. if (size == 0)
  319. return -EINVAL;
  320. /* Allocate the new object */
  321. obj = i915_gem_alloc_object(dev, size);
  322. if (obj == NULL)
  323. return -ENOMEM;
  324. ret = drm_gem_handle_create(file, &obj->base, &handle);
  325. /* drop reference from allocate - handle holds it now */
  326. drm_gem_object_unreference_unlocked(&obj->base);
  327. if (ret)
  328. return ret;
  329. *handle_p = handle;
  330. return 0;
  331. }
  332. int
  333. i915_gem_dumb_create(struct drm_file *file,
  334. struct drm_device *dev,
  335. struct drm_mode_create_dumb *args)
  336. {
  337. /* have to work out size/pitch and return them */
  338. args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
  339. args->size = args->pitch * args->height;
  340. return i915_gem_create(file, dev,
  341. args->size, &args->handle);
  342. }
  343. /**
  344. * Creates a new mm object and returns a handle to it.
  345. */
  346. int
  347. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  348. struct drm_file *file)
  349. {
  350. struct drm_i915_gem_create *args = data;
  351. return i915_gem_create(file, dev,
  352. args->size, &args->handle);
  353. }
  354. static inline int
  355. __copy_to_user_swizzled(char __user *cpu_vaddr,
  356. const char *gpu_vaddr, int gpu_offset,
  357. int length)
  358. {
  359. int ret, cpu_offset = 0;
  360. while (length > 0) {
  361. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  362. int this_length = min(cacheline_end - gpu_offset, length);
  363. int swizzled_gpu_offset = gpu_offset ^ 64;
  364. ret = __copy_to_user(cpu_vaddr + cpu_offset,
  365. gpu_vaddr + swizzled_gpu_offset,
  366. this_length);
  367. if (ret)
  368. return ret + length;
  369. cpu_offset += this_length;
  370. gpu_offset += this_length;
  371. length -= this_length;
  372. }
  373. return 0;
  374. }
  375. static inline int
  376. __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
  377. const char __user *cpu_vaddr,
  378. int length)
  379. {
  380. int ret, cpu_offset = 0;
  381. while (length > 0) {
  382. int cacheline_end = ALIGN(gpu_offset + 1, 64);
  383. int this_length = min(cacheline_end - gpu_offset, length);
  384. int swizzled_gpu_offset = gpu_offset ^ 64;
  385. ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
  386. cpu_vaddr + cpu_offset,
  387. this_length);
  388. if (ret)
  389. return ret + length;
  390. cpu_offset += this_length;
  391. gpu_offset += this_length;
  392. length -= this_length;
  393. }
  394. return 0;
  395. }
  396. /*
  397. * Pins the specified object's pages and synchronizes the object with
  398. * GPU accesses. Sets needs_clflush to non-zero if the caller should
  399. * flush the object from the CPU cache.
  400. */
  401. int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
  402. int *needs_clflush)
  403. {
  404. int ret;
  405. *needs_clflush = 0;
  406. if (!obj->base.filp)
  407. return -EINVAL;
  408. if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
  409. /* If we're not in the cpu read domain, set ourself into the gtt
  410. * read domain and manually flush cachelines (if required). This
  411. * optimizes for the case when the gpu will dirty the data
  412. * anyway again before the next pread happens. */
  413. *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
  414. obj->cache_level);
  415. ret = i915_gem_object_wait_rendering(obj, true);
  416. if (ret)
  417. return ret;
  418. }
  419. ret = i915_gem_object_get_pages(obj);
  420. if (ret)
  421. return ret;
  422. i915_gem_object_pin_pages(obj);
  423. return ret;
  424. }
  425. /* Per-page copy function for the shmem pread fastpath.
  426. * Flushes invalid cachelines before reading the target if
  427. * needs_clflush is set. */
  428. static int
  429. shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
  430. char __user *user_data,
  431. bool page_do_bit17_swizzling, bool needs_clflush)
  432. {
  433. char *vaddr;
  434. int ret;
  435. if (unlikely(page_do_bit17_swizzling))
  436. return -EINVAL;
  437. vaddr = kmap_atomic(page);
  438. if (needs_clflush)
  439. drm_clflush_virt_range(vaddr + shmem_page_offset,
  440. page_length);
  441. ret = __copy_to_user_inatomic(user_data,
  442. vaddr + shmem_page_offset,
  443. page_length);
  444. kunmap_atomic(vaddr);
  445. return ret ? -EFAULT : 0;
  446. }
  447. static void
  448. shmem_clflush_swizzled_range(char *addr, unsigned long length,
  449. bool swizzled)
  450. {
  451. if (unlikely(swizzled)) {
  452. unsigned long start = (unsigned long) addr;
  453. unsigned long end = (unsigned long) addr + length;
  454. /* For swizzling simply ensure that we always flush both
  455. * channels. Lame, but simple and it works. Swizzled
  456. * pwrite/pread is far from a hotpath - current userspace
  457. * doesn't use it at all. */
  458. start = round_down(start, 128);
  459. end = round_up(end, 128);
  460. drm_clflush_virt_range((void *)start, end - start);
  461. } else {
  462. drm_clflush_virt_range(addr, length);
  463. }
  464. }
  465. /* Only difference to the fast-path function is that this can handle bit17
  466. * and uses non-atomic copy and kmap functions. */
  467. static int
  468. shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
  469. char __user *user_data,
  470. bool page_do_bit17_swizzling, bool needs_clflush)
  471. {
  472. char *vaddr;
  473. int ret;
  474. vaddr = kmap(page);
  475. if (needs_clflush)
  476. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  477. page_length,
  478. page_do_bit17_swizzling);
  479. if (page_do_bit17_swizzling)
  480. ret = __copy_to_user_swizzled(user_data,
  481. vaddr, shmem_page_offset,
  482. page_length);
  483. else
  484. ret = __copy_to_user(user_data,
  485. vaddr + shmem_page_offset,
  486. page_length);
  487. kunmap(page);
  488. return ret ? - EFAULT : 0;
  489. }
  490. static int
  491. i915_gem_shmem_pread(struct drm_device *dev,
  492. struct drm_i915_gem_object *obj,
  493. struct drm_i915_gem_pread *args,
  494. struct drm_file *file)
  495. {
  496. char __user *user_data;
  497. ssize_t remain;
  498. loff_t offset;
  499. int shmem_page_offset, page_length, ret = 0;
  500. int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  501. int prefaulted = 0;
  502. int needs_clflush = 0;
  503. struct sg_page_iter sg_iter;
  504. user_data = to_user_ptr(args->data_ptr);
  505. remain = args->size;
  506. obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  507. ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
  508. if (ret)
  509. return ret;
  510. offset = args->offset;
  511. for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
  512. offset >> PAGE_SHIFT) {
  513. struct page *page = sg_page_iter_page(&sg_iter);
  514. if (remain <= 0)
  515. break;
  516. /* Operation in this page
  517. *
  518. * shmem_page_offset = offset within page in shmem file
  519. * page_length = bytes to copy for this page
  520. */
  521. shmem_page_offset = offset_in_page(offset);
  522. page_length = remain;
  523. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  524. page_length = PAGE_SIZE - shmem_page_offset;
  525. page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  526. (page_to_phys(page) & (1 << 17)) != 0;
  527. ret = shmem_pread_fast(page, shmem_page_offset, page_length,
  528. user_data, page_do_bit17_swizzling,
  529. needs_clflush);
  530. if (ret == 0)
  531. goto next_page;
  532. mutex_unlock(&dev->struct_mutex);
  533. if (likely(!i915.prefault_disable) && !prefaulted) {
  534. ret = fault_in_multipages_writeable(user_data, remain);
  535. /* Userspace is tricking us, but we've already clobbered
  536. * its pages with the prefault and promised to write the
  537. * data up to the first fault. Hence ignore any errors
  538. * and just continue. */
  539. (void)ret;
  540. prefaulted = 1;
  541. }
  542. ret = shmem_pread_slow(page, shmem_page_offset, page_length,
  543. user_data, page_do_bit17_swizzling,
  544. needs_clflush);
  545. mutex_lock(&dev->struct_mutex);
  546. if (ret)
  547. goto out;
  548. next_page:
  549. remain -= page_length;
  550. user_data += page_length;
  551. offset += page_length;
  552. }
  553. out:
  554. i915_gem_object_unpin_pages(obj);
  555. return ret;
  556. }
  557. /**
  558. * Reads data from the object referenced by handle.
  559. *
  560. * On error, the contents of *data are undefined.
  561. */
  562. int
  563. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  564. struct drm_file *file)
  565. {
  566. struct drm_i915_gem_pread *args = data;
  567. struct drm_i915_gem_object *obj;
  568. int ret = 0;
  569. if (args->size == 0)
  570. return 0;
  571. if (!access_ok(VERIFY_WRITE,
  572. to_user_ptr(args->data_ptr),
  573. args->size))
  574. return -EFAULT;
  575. ret = i915_mutex_lock_interruptible(dev);
  576. if (ret)
  577. return ret;
  578. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  579. if (&obj->base == NULL) {
  580. ret = -ENOENT;
  581. goto unlock;
  582. }
  583. /* Bounds check source. */
  584. if (args->offset > obj->base.size ||
  585. args->size > obj->base.size - args->offset) {
  586. ret = -EINVAL;
  587. goto out;
  588. }
  589. /* prime objects have no backing filp to GEM pread/pwrite
  590. * pages from.
  591. */
  592. if (!obj->base.filp) {
  593. ret = -EINVAL;
  594. goto out;
  595. }
  596. trace_i915_gem_object_pread(obj, args->offset, args->size);
  597. ret = i915_gem_shmem_pread(dev, obj, args, file);
  598. out:
  599. drm_gem_object_unreference(&obj->base);
  600. unlock:
  601. mutex_unlock(&dev->struct_mutex);
  602. return ret;
  603. }
  604. /* This is the fast write path which cannot handle
  605. * page faults in the source data
  606. */
  607. static inline int
  608. fast_user_write(struct io_mapping *mapping,
  609. loff_t page_base, int page_offset,
  610. char __user *user_data,
  611. int length)
  612. {
  613. void __iomem *vaddr_atomic;
  614. void *vaddr;
  615. unsigned long unwritten;
  616. vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  617. /* We can use the cpu mem copy function because this is X86. */
  618. vaddr = (void __force*)vaddr_atomic + page_offset;
  619. unwritten = __copy_from_user_inatomic_nocache(vaddr,
  620. user_data, length);
  621. io_mapping_unmap_atomic(vaddr_atomic);
  622. return unwritten;
  623. }
  624. /**
  625. * This is the fast pwrite path, where we copy the data directly from the
  626. * user into the GTT, uncached.
  627. */
  628. static int
  629. i915_gem_gtt_pwrite_fast(struct drm_device *dev,
  630. struct drm_i915_gem_object *obj,
  631. struct drm_i915_gem_pwrite *args,
  632. struct drm_file *file)
  633. {
  634. struct drm_i915_private *dev_priv = dev->dev_private;
  635. ssize_t remain;
  636. loff_t offset, page_base;
  637. char __user *user_data;
  638. int page_offset, page_length, ret;
  639. ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
  640. if (ret)
  641. goto out;
  642. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  643. if (ret)
  644. goto out_unpin;
  645. ret = i915_gem_object_put_fence(obj);
  646. if (ret)
  647. goto out_unpin;
  648. user_data = to_user_ptr(args->data_ptr);
  649. remain = args->size;
  650. offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
  651. intel_fb_obj_invalidate(obj, ORIGIN_GTT);
  652. while (remain > 0) {
  653. /* Operation in this page
  654. *
  655. * page_base = page offset within aperture
  656. * page_offset = offset within page
  657. * page_length = bytes to copy for this page
  658. */
  659. page_base = offset & PAGE_MASK;
  660. page_offset = offset_in_page(offset);
  661. page_length = remain;
  662. if ((page_offset + remain) > PAGE_SIZE)
  663. page_length = PAGE_SIZE - page_offset;
  664. /* If we get a fault while copying data, then (presumably) our
  665. * source page isn't available. Return the error and we'll
  666. * retry in the slow path.
  667. */
  668. if (fast_user_write(dev_priv->gtt.mappable, page_base,
  669. page_offset, user_data, page_length)) {
  670. ret = -EFAULT;
  671. goto out_flush;
  672. }
  673. remain -= page_length;
  674. user_data += page_length;
  675. offset += page_length;
  676. }
  677. out_flush:
  678. intel_fb_obj_flush(obj, false, ORIGIN_GTT);
  679. out_unpin:
  680. i915_gem_object_ggtt_unpin(obj);
  681. out:
  682. return ret;
  683. }
  684. /* Per-page copy function for the shmem pwrite fastpath.
  685. * Flushes invalid cachelines before writing to the target if
  686. * needs_clflush_before is set and flushes out any written cachelines after
  687. * writing if needs_clflush is set. */
  688. static int
  689. shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
  690. char __user *user_data,
  691. bool page_do_bit17_swizzling,
  692. bool needs_clflush_before,
  693. bool needs_clflush_after)
  694. {
  695. char *vaddr;
  696. int ret;
  697. if (unlikely(page_do_bit17_swizzling))
  698. return -EINVAL;
  699. vaddr = kmap_atomic(page);
  700. if (needs_clflush_before)
  701. drm_clflush_virt_range(vaddr + shmem_page_offset,
  702. page_length);
  703. ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
  704. user_data, page_length);
  705. if (needs_clflush_after)
  706. drm_clflush_virt_range(vaddr + shmem_page_offset,
  707. page_length);
  708. kunmap_atomic(vaddr);
  709. return ret ? -EFAULT : 0;
  710. }
  711. /* Only difference to the fast-path function is that this can handle bit17
  712. * and uses non-atomic copy and kmap functions. */
  713. static int
  714. shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
  715. char __user *user_data,
  716. bool page_do_bit17_swizzling,
  717. bool needs_clflush_before,
  718. bool needs_clflush_after)
  719. {
  720. char *vaddr;
  721. int ret;
  722. vaddr = kmap(page);
  723. if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
  724. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  725. page_length,
  726. page_do_bit17_swizzling);
  727. if (page_do_bit17_swizzling)
  728. ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
  729. user_data,
  730. page_length);
  731. else
  732. ret = __copy_from_user(vaddr + shmem_page_offset,
  733. user_data,
  734. page_length);
  735. if (needs_clflush_after)
  736. shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  737. page_length,
  738. page_do_bit17_swizzling);
  739. kunmap(page);
  740. return ret ? -EFAULT : 0;
  741. }
  742. static int
  743. i915_gem_shmem_pwrite(struct drm_device *dev,
  744. struct drm_i915_gem_object *obj,
  745. struct drm_i915_gem_pwrite *args,
  746. struct drm_file *file)
  747. {
  748. ssize_t remain;
  749. loff_t offset;
  750. char __user *user_data;
  751. int shmem_page_offset, page_length, ret = 0;
  752. int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  753. int hit_slowpath = 0;
  754. int needs_clflush_after = 0;
  755. int needs_clflush_before = 0;
  756. struct sg_page_iter sg_iter;
  757. user_data = to_user_ptr(args->data_ptr);
  758. remain = args->size;
  759. obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  760. if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  761. /* If we're not in the cpu write domain, set ourself into the gtt
  762. * write domain and manually flush cachelines (if required). This
  763. * optimizes for the case when the gpu will use the data
  764. * right away and we therefore have to clflush anyway. */
  765. needs_clflush_after = cpu_write_needs_clflush(obj);
  766. ret = i915_gem_object_wait_rendering(obj, false);
  767. if (ret)
  768. return ret;
  769. }
  770. /* Same trick applies to invalidate partially written cachelines read
  771. * before writing. */
  772. if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
  773. needs_clflush_before =
  774. !cpu_cache_is_coherent(dev, obj->cache_level);
  775. ret = i915_gem_object_get_pages(obj);
  776. if (ret)
  777. return ret;
  778. intel_fb_obj_invalidate(obj, ORIGIN_CPU);
  779. i915_gem_object_pin_pages(obj);
  780. offset = args->offset;
  781. obj->dirty = 1;
  782. for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
  783. offset >> PAGE_SHIFT) {
  784. struct page *page = sg_page_iter_page(&sg_iter);
  785. int partial_cacheline_write;
  786. if (remain <= 0)
  787. break;
  788. /* Operation in this page
  789. *
  790. * shmem_page_offset = offset within page in shmem file
  791. * page_length = bytes to copy for this page
  792. */
  793. shmem_page_offset = offset_in_page(offset);
  794. page_length = remain;
  795. if ((shmem_page_offset + page_length) > PAGE_SIZE)
  796. page_length = PAGE_SIZE - shmem_page_offset;
  797. /* If we don't overwrite a cacheline completely we need to be
  798. * careful to have up-to-date data by first clflushing. Don't
  799. * overcomplicate things and flush the entire patch. */
  800. partial_cacheline_write = needs_clflush_before &&
  801. ((shmem_page_offset | page_length)
  802. & (boot_cpu_data.x86_clflush_size - 1));
  803. page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  804. (page_to_phys(page) & (1 << 17)) != 0;
  805. ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
  806. user_data, page_do_bit17_swizzling,
  807. partial_cacheline_write,
  808. needs_clflush_after);
  809. if (ret == 0)
  810. goto next_page;
  811. hit_slowpath = 1;
  812. mutex_unlock(&dev->struct_mutex);
  813. ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
  814. user_data, page_do_bit17_swizzling,
  815. partial_cacheline_write,
  816. needs_clflush_after);
  817. mutex_lock(&dev->struct_mutex);
  818. if (ret)
  819. goto out;
  820. next_page:
  821. remain -= page_length;
  822. user_data += page_length;
  823. offset += page_length;
  824. }
  825. out:
  826. i915_gem_object_unpin_pages(obj);
  827. if (hit_slowpath) {
  828. /*
  829. * Fixup: Flush cpu caches in case we didn't flush the dirty
  830. * cachelines in-line while writing and the object moved
  831. * out of the cpu write domain while we've dropped the lock.
  832. */
  833. if (!needs_clflush_after &&
  834. obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  835. if (i915_gem_clflush_object(obj, obj->pin_display))
  836. i915_gem_chipset_flush(dev);
  837. }
  838. }
  839. if (needs_clflush_after)
  840. i915_gem_chipset_flush(dev);
  841. intel_fb_obj_flush(obj, false, ORIGIN_CPU);
  842. return ret;
  843. }
  844. /**
  845. * Writes data to the object referenced by handle.
  846. *
  847. * On error, the contents of the buffer that were to be modified are undefined.
  848. */
  849. int
  850. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  851. struct drm_file *file)
  852. {
  853. struct drm_i915_private *dev_priv = dev->dev_private;
  854. struct drm_i915_gem_pwrite *args = data;
  855. struct drm_i915_gem_object *obj;
  856. int ret;
  857. if (args->size == 0)
  858. return 0;
  859. if (!access_ok(VERIFY_READ,
  860. to_user_ptr(args->data_ptr),
  861. args->size))
  862. return -EFAULT;
  863. if (likely(!i915.prefault_disable)) {
  864. ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
  865. args->size);
  866. if (ret)
  867. return -EFAULT;
  868. }
  869. intel_runtime_pm_get(dev_priv);
  870. ret = i915_mutex_lock_interruptible(dev);
  871. if (ret)
  872. goto put_rpm;
  873. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  874. if (&obj->base == NULL) {
  875. ret = -ENOENT;
  876. goto unlock;
  877. }
  878. /* Bounds check destination. */
  879. if (args->offset > obj->base.size ||
  880. args->size > obj->base.size - args->offset) {
  881. ret = -EINVAL;
  882. goto out;
  883. }
  884. /* prime objects have no backing filp to GEM pread/pwrite
  885. * pages from.
  886. */
  887. if (!obj->base.filp) {
  888. ret = -EINVAL;
  889. goto out;
  890. }
  891. trace_i915_gem_object_pwrite(obj, args->offset, args->size);
  892. ret = -EFAULT;
  893. /* We can only do the GTT pwrite on untiled buffers, as otherwise
  894. * it would end up going through the fenced access, and we'll get
  895. * different detiling behavior between reading and writing.
  896. * pread/pwrite currently are reading and writing from the CPU
  897. * perspective, requiring manual detiling by the client.
  898. */
  899. if (obj->tiling_mode == I915_TILING_NONE &&
  900. obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
  901. cpu_write_needs_clflush(obj)) {
  902. ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
  903. /* Note that the gtt paths might fail with non-page-backed user
  904. * pointers (e.g. gtt mappings when moving data between
  905. * textures). Fallback to the shmem path in that case. */
  906. }
  907. if (ret == -EFAULT || ret == -ENOSPC) {
  908. if (obj->phys_handle)
  909. ret = i915_gem_phys_pwrite(obj, args, file);
  910. else
  911. ret = i915_gem_shmem_pwrite(dev, obj, args, file);
  912. }
  913. out:
  914. drm_gem_object_unreference(&obj->base);
  915. unlock:
  916. mutex_unlock(&dev->struct_mutex);
  917. put_rpm:
  918. intel_runtime_pm_put(dev_priv);
  919. return ret;
  920. }
  921. int
  922. i915_gem_check_wedge(struct i915_gpu_error *error,
  923. bool interruptible)
  924. {
  925. if (i915_reset_in_progress(error)) {
  926. /* Non-interruptible callers can't handle -EAGAIN, hence return
  927. * -EIO unconditionally for these. */
  928. if (!interruptible)
  929. return -EIO;
  930. /* Recovery complete, but the reset failed ... */
  931. if (i915_terminally_wedged(error))
  932. return -EIO;
  933. /*
  934. * Check if GPU Reset is in progress - we need intel_ring_begin
  935. * to work properly to reinit the hw state while the gpu is
  936. * still marked as reset-in-progress. Handle this with a flag.
  937. */
  938. if (!error->reload_in_reset)
  939. return -EAGAIN;
  940. }
  941. return 0;
  942. }
  943. static void fake_irq(unsigned long data)
  944. {
  945. wake_up_process((struct task_struct *)data);
  946. }
  947. static bool missed_irq(struct drm_i915_private *dev_priv,
  948. struct intel_engine_cs *ring)
  949. {
  950. return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
  951. }
  952. static int __i915_spin_request(struct drm_i915_gem_request *req)
  953. {
  954. unsigned long timeout;
  955. if (i915_gem_request_get_ring(req)->irq_refcount)
  956. return -EBUSY;
  957. timeout = jiffies + 1;
  958. while (!need_resched()) {
  959. if (i915_gem_request_completed(req, true))
  960. return 0;
  961. if (time_after_eq(jiffies, timeout))
  962. break;
  963. cpu_relax_lowlatency();
  964. }
  965. if (i915_gem_request_completed(req, false))
  966. return 0;
  967. return -EAGAIN;
  968. }
  969. /**
  970. * __i915_wait_request - wait until execution of request has finished
  971. * @req: duh!
  972. * @reset_counter: reset sequence associated with the given request
  973. * @interruptible: do an interruptible wait (normally yes)
  974. * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  975. *
  976. * Note: It is of utmost importance that the passed in seqno and reset_counter
  977. * values have been read by the caller in an smp safe manner. Where read-side
  978. * locks are involved, it is sufficient to read the reset_counter before
  979. * unlocking the lock that protects the seqno. For lockless tricks, the
  980. * reset_counter _must_ be read before, and an appropriate smp_rmb must be
  981. * inserted.
  982. *
  983. * Returns 0 if the request was found within the alloted time. Else returns the
  984. * errno with remaining time filled in timeout argument.
  985. */
  986. int __i915_wait_request(struct drm_i915_gem_request *req,
  987. unsigned reset_counter,
  988. bool interruptible,
  989. s64 *timeout,
  990. struct intel_rps_client *rps)
  991. {
  992. struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
  993. struct drm_device *dev = ring->dev;
  994. struct drm_i915_private *dev_priv = dev->dev_private;
  995. const bool irq_test_in_progress =
  996. ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
  997. DEFINE_WAIT(wait);
  998. unsigned long timeout_expire;
  999. s64 before, now;
  1000. int ret;
  1001. WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
  1002. if (list_empty(&req->list))
  1003. return 0;
  1004. if (i915_gem_request_completed(req, true))
  1005. return 0;
  1006. timeout_expire = timeout ?
  1007. jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
  1008. if (INTEL_INFO(dev_priv)->gen >= 6)
  1009. gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
  1010. /* Record current time in case interrupted by signal, or wedged */
  1011. trace_i915_gem_request_wait_begin(req);
  1012. before = ktime_get_raw_ns();
  1013. /* Optimistic spin for the next jiffie before touching IRQs */
  1014. ret = __i915_spin_request(req);
  1015. if (ret == 0)
  1016. goto out;
  1017. if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
  1018. ret = -ENODEV;
  1019. goto out;
  1020. }
  1021. for (;;) {
  1022. struct timer_list timer;
  1023. prepare_to_wait(&ring->irq_queue, &wait,
  1024. interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
  1025. /* We need to check whether any gpu reset happened in between
  1026. * the caller grabbing the seqno and now ... */
  1027. if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
  1028. /* ... but upgrade the -EAGAIN to an -EIO if the gpu
  1029. * is truely gone. */
  1030. ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
  1031. if (ret == 0)
  1032. ret = -EAGAIN;
  1033. break;
  1034. }
  1035. if (i915_gem_request_completed(req, false)) {
  1036. ret = 0;
  1037. break;
  1038. }
  1039. if (interruptible && signal_pending(current)) {
  1040. ret = -ERESTARTSYS;
  1041. break;
  1042. }
  1043. if (timeout && time_after_eq(jiffies, timeout_expire)) {
  1044. ret = -ETIME;
  1045. break;
  1046. }
  1047. timer.function = NULL;
  1048. if (timeout || missed_irq(dev_priv, ring)) {
  1049. unsigned long expire;
  1050. setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
  1051. expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
  1052. mod_timer(&timer, expire);
  1053. }
  1054. io_schedule();
  1055. if (timer.function) {
  1056. del_singleshot_timer_sync(&timer);
  1057. destroy_timer_on_stack(&timer);
  1058. }
  1059. }
  1060. if (!irq_test_in_progress)
  1061. ring->irq_put(ring);
  1062. finish_wait(&ring->irq_queue, &wait);
  1063. out:
  1064. now = ktime_get_raw_ns();
  1065. trace_i915_gem_request_wait_end(req);
  1066. if (timeout) {
  1067. s64 tres = *timeout - (now - before);
  1068. *timeout = tres < 0 ? 0 : tres;
  1069. /*
  1070. * Apparently ktime isn't accurate enough and occasionally has a
  1071. * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
  1072. * things up to make the test happy. We allow up to 1 jiffy.
  1073. *
  1074. * This is a regrssion from the timespec->ktime conversion.
  1075. */
  1076. if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
  1077. *timeout = 0;
  1078. }
  1079. return ret;
  1080. }
  1081. int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
  1082. struct drm_file *file)
  1083. {
  1084. struct drm_i915_private *dev_private;
  1085. struct drm_i915_file_private *file_priv;
  1086. WARN_ON(!req || !file || req->file_priv);
  1087. if (!req || !file)
  1088. return -EINVAL;
  1089. if (req->file_priv)
  1090. return -EINVAL;
  1091. dev_private = req->ring->dev->dev_private;
  1092. file_priv = file->driver_priv;
  1093. spin_lock(&file_priv->mm.lock);
  1094. req->file_priv = file_priv;
  1095. list_add_tail(&req->client_list, &file_priv->mm.request_list);
  1096. spin_unlock(&file_priv->mm.lock);
  1097. req->pid = get_pid(task_pid(current));
  1098. return 0;
  1099. }
  1100. static inline void
  1101. i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  1102. {
  1103. struct drm_i915_file_private *file_priv = request->file_priv;
  1104. if (!file_priv)
  1105. return;
  1106. spin_lock(&file_priv->mm.lock);
  1107. list_del(&request->client_list);
  1108. request->file_priv = NULL;
  1109. spin_unlock(&file_priv->mm.lock);
  1110. put_pid(request->pid);
  1111. request->pid = NULL;
  1112. }
  1113. static void i915_gem_request_retire(struct drm_i915_gem_request *request)
  1114. {
  1115. trace_i915_gem_request_retire(request);
  1116. /* We know the GPU must have read the request to have
  1117. * sent us the seqno + interrupt, so use the position
  1118. * of tail of the request to update the last known position
  1119. * of the GPU head.
  1120. *
  1121. * Note this requires that we are always called in request
  1122. * completion order.
  1123. */
  1124. request->ringbuf->last_retired_head = request->postfix;
  1125. list_del_init(&request->list);
  1126. i915_gem_request_remove_from_client(request);
  1127. i915_gem_request_unreference(request);
  1128. }
  1129. static void
  1130. __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
  1131. {
  1132. struct intel_engine_cs *engine = req->ring;
  1133. struct drm_i915_gem_request *tmp;
  1134. lockdep_assert_held(&engine->dev->struct_mutex);
  1135. if (list_empty(&req->list))
  1136. return;
  1137. do {
  1138. tmp = list_first_entry(&engine->request_list,
  1139. typeof(*tmp), list);
  1140. i915_gem_request_retire(tmp);
  1141. } while (tmp != req);
  1142. WARN_ON(i915_verify_lists(engine->dev));
  1143. }
  1144. /**
  1145. * Waits for a request to be signaled, and cleans up the
  1146. * request and object lists appropriately for that event.
  1147. */
  1148. int
  1149. i915_wait_request(struct drm_i915_gem_request *req)
  1150. {
  1151. struct drm_device *dev;
  1152. struct drm_i915_private *dev_priv;
  1153. bool interruptible;
  1154. int ret;
  1155. BUG_ON(req == NULL);
  1156. dev = req->ring->dev;
  1157. dev_priv = dev->dev_private;
  1158. interruptible = dev_priv->mm.interruptible;
  1159. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1160. ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
  1161. if (ret)
  1162. return ret;
  1163. ret = __i915_wait_request(req,
  1164. atomic_read(&dev_priv->gpu_error.reset_counter),
  1165. interruptible, NULL, NULL);
  1166. if (ret)
  1167. return ret;
  1168. __i915_gem_request_retire__upto(req);
  1169. return 0;
  1170. }
  1171. /**
  1172. * Ensures that all rendering to the object has completed and the object is
  1173. * safe to unbind from the GTT or access from the CPU.
  1174. */
  1175. int
  1176. i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  1177. bool readonly)
  1178. {
  1179. int ret, i;
  1180. if (!obj->active)
  1181. return 0;
  1182. if (readonly) {
  1183. if (obj->last_write_req != NULL) {
  1184. ret = i915_wait_request(obj->last_write_req);
  1185. if (ret)
  1186. return ret;
  1187. i = obj->last_write_req->ring->id;
  1188. if (obj->last_read_req[i] == obj->last_write_req)
  1189. i915_gem_object_retire__read(obj, i);
  1190. else
  1191. i915_gem_object_retire__write(obj);
  1192. }
  1193. } else {
  1194. for (i = 0; i < I915_NUM_RINGS; i++) {
  1195. if (obj->last_read_req[i] == NULL)
  1196. continue;
  1197. ret = i915_wait_request(obj->last_read_req[i]);
  1198. if (ret)
  1199. return ret;
  1200. i915_gem_object_retire__read(obj, i);
  1201. }
  1202. RQ_BUG_ON(obj->active);
  1203. }
  1204. return 0;
  1205. }
  1206. static void
  1207. i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
  1208. struct drm_i915_gem_request *req)
  1209. {
  1210. int ring = req->ring->id;
  1211. if (obj->last_read_req[ring] == req)
  1212. i915_gem_object_retire__read(obj, ring);
  1213. else if (obj->last_write_req == req)
  1214. i915_gem_object_retire__write(obj);
  1215. __i915_gem_request_retire__upto(req);
  1216. }
  1217. /* A nonblocking variant of the above wait. This is a highly dangerous routine
  1218. * as the object state may change during this call.
  1219. */
  1220. static __must_check int
  1221. i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
  1222. struct intel_rps_client *rps,
  1223. bool readonly)
  1224. {
  1225. struct drm_device *dev = obj->base.dev;
  1226. struct drm_i915_private *dev_priv = dev->dev_private;
  1227. struct drm_i915_gem_request *requests[I915_NUM_RINGS];
  1228. unsigned reset_counter;
  1229. int ret, i, n = 0;
  1230. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1231. BUG_ON(!dev_priv->mm.interruptible);
  1232. if (!obj->active)
  1233. return 0;
  1234. ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
  1235. if (ret)
  1236. return ret;
  1237. reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  1238. if (readonly) {
  1239. struct drm_i915_gem_request *req;
  1240. req = obj->last_write_req;
  1241. if (req == NULL)
  1242. return 0;
  1243. requests[n++] = i915_gem_request_reference(req);
  1244. } else {
  1245. for (i = 0; i < I915_NUM_RINGS; i++) {
  1246. struct drm_i915_gem_request *req;
  1247. req = obj->last_read_req[i];
  1248. if (req == NULL)
  1249. continue;
  1250. requests[n++] = i915_gem_request_reference(req);
  1251. }
  1252. }
  1253. mutex_unlock(&dev->struct_mutex);
  1254. for (i = 0; ret == 0 && i < n; i++)
  1255. ret = __i915_wait_request(requests[i], reset_counter, true,
  1256. NULL, rps);
  1257. mutex_lock(&dev->struct_mutex);
  1258. for (i = 0; i < n; i++) {
  1259. if (ret == 0)
  1260. i915_gem_object_retire_request(obj, requests[i]);
  1261. i915_gem_request_unreference(requests[i]);
  1262. }
  1263. return ret;
  1264. }
  1265. static struct intel_rps_client *to_rps_client(struct drm_file *file)
  1266. {
  1267. struct drm_i915_file_private *fpriv = file->driver_priv;
  1268. return &fpriv->rps;
  1269. }
  1270. /**
  1271. * Called when user space prepares to use an object with the CPU, either
  1272. * through the mmap ioctl's mapping or a GTT mapping.
  1273. */
  1274. int
  1275. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  1276. struct drm_file *file)
  1277. {
  1278. struct drm_i915_gem_set_domain *args = data;
  1279. struct drm_i915_gem_object *obj;
  1280. uint32_t read_domains = args->read_domains;
  1281. uint32_t write_domain = args->write_domain;
  1282. int ret;
  1283. /* Only handle setting domains to types used by the CPU. */
  1284. if (write_domain & I915_GEM_GPU_DOMAINS)
  1285. return -EINVAL;
  1286. if (read_domains & I915_GEM_GPU_DOMAINS)
  1287. return -EINVAL;
  1288. /* Having something in the write domain implies it's in the read
  1289. * domain, and only that read domain. Enforce that in the request.
  1290. */
  1291. if (write_domain != 0 && read_domains != write_domain)
  1292. return -EINVAL;
  1293. ret = i915_mutex_lock_interruptible(dev);
  1294. if (ret)
  1295. return ret;
  1296. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  1297. if (&obj->base == NULL) {
  1298. ret = -ENOENT;
  1299. goto unlock;
  1300. }
  1301. /* Try to flush the object off the GPU without holding the lock.
  1302. * We will repeat the flush holding the lock in the normal manner
  1303. * to catch cases where we are gazumped.
  1304. */
  1305. ret = i915_gem_object_wait_rendering__nonblocking(obj,
  1306. to_rps_client(file),
  1307. !write_domain);
  1308. if (ret)
  1309. goto unref;
  1310. if (read_domains & I915_GEM_DOMAIN_GTT)
  1311. ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  1312. else
  1313. ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  1314. if (write_domain != 0)
  1315. intel_fb_obj_invalidate(obj,
  1316. write_domain == I915_GEM_DOMAIN_GTT ?
  1317. ORIGIN_GTT : ORIGIN_CPU);
  1318. unref:
  1319. drm_gem_object_unreference(&obj->base);
  1320. unlock:
  1321. mutex_unlock(&dev->struct_mutex);
  1322. return ret;
  1323. }
  1324. /**
  1325. * Called when user space has done writes to this buffer
  1326. */
  1327. int
  1328. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  1329. struct drm_file *file)
  1330. {
  1331. struct drm_i915_gem_sw_finish *args = data;
  1332. struct drm_i915_gem_object *obj;
  1333. int ret = 0;
  1334. ret = i915_mutex_lock_interruptible(dev);
  1335. if (ret)
  1336. return ret;
  1337. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  1338. if (&obj->base == NULL) {
  1339. ret = -ENOENT;
  1340. goto unlock;
  1341. }
  1342. /* Pinned buffers may be scanout, so flush the cache */
  1343. if (obj->pin_display)
  1344. i915_gem_object_flush_cpu_write_domain(obj);
  1345. drm_gem_object_unreference(&obj->base);
  1346. unlock:
  1347. mutex_unlock(&dev->struct_mutex);
  1348. return ret;
  1349. }
  1350. /**
  1351. * Maps the contents of an object, returning the address it is mapped
  1352. * into.
  1353. *
  1354. * While the mapping holds a reference on the contents of the object, it doesn't
  1355. * imply a ref on the object itself.
  1356. *
  1357. * IMPORTANT:
  1358. *
  1359. * DRM driver writers who look a this function as an example for how to do GEM
  1360. * mmap support, please don't implement mmap support like here. The modern way
  1361. * to implement DRM mmap support is with an mmap offset ioctl (like
  1362. * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
  1363. * That way debug tooling like valgrind will understand what's going on, hiding
  1364. * the mmap call in a driver private ioctl will break that. The i915 driver only
  1365. * does cpu mmaps this way because we didn't know better.
  1366. */
  1367. int
  1368. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1369. struct drm_file *file)
  1370. {
  1371. struct drm_i915_gem_mmap *args = data;
  1372. struct drm_gem_object *obj;
  1373. unsigned long addr;
  1374. if (args->flags & ~(I915_MMAP_WC))
  1375. return -EINVAL;
  1376. if (args->flags & I915_MMAP_WC && !cpu_has_pat)
  1377. return -ENODEV;
  1378. obj = drm_gem_object_lookup(dev, file, args->handle);
  1379. if (obj == NULL)
  1380. return -ENOENT;
  1381. /* prime objects have no backing filp to GEM mmap
  1382. * pages from.
  1383. */
  1384. if (!obj->filp) {
  1385. drm_gem_object_unreference_unlocked(obj);
  1386. return -EINVAL;
  1387. }
  1388. addr = vm_mmap(obj->filp, 0, args->size,
  1389. PROT_READ | PROT_WRITE, MAP_SHARED,
  1390. args->offset);
  1391. if (args->flags & I915_MMAP_WC) {
  1392. struct mm_struct *mm = current->mm;
  1393. struct vm_area_struct *vma;
  1394. down_write(&mm->mmap_sem);
  1395. vma = find_vma(mm, addr);
  1396. if (vma)
  1397. vma->vm_page_prot =
  1398. pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  1399. else
  1400. addr = -ENOMEM;
  1401. up_write(&mm->mmap_sem);
  1402. }
  1403. drm_gem_object_unreference_unlocked(obj);
  1404. if (IS_ERR((void *)addr))
  1405. return addr;
  1406. args->addr_ptr = (uint64_t) addr;
  1407. return 0;
  1408. }
  1409. /**
  1410. * i915_gem_fault - fault a page into the GTT
  1411. * vma: VMA in question
  1412. * vmf: fault info
  1413. *
  1414. * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  1415. * from userspace. The fault handler takes care of binding the object to
  1416. * the GTT (if needed), allocating and programming a fence register (again,
  1417. * only if needed based on whether the old reg is still valid or the object
  1418. * is tiled) and inserting a new PTE into the faulting process.
  1419. *
  1420. * Note that the faulting process may involve evicting existing objects
  1421. * from the GTT and/or fence registers to make room. So performance may
  1422. * suffer if the GTT working set is large or there are few fence registers
  1423. * left.
  1424. */
  1425. int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1426. {
  1427. struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
  1428. struct drm_device *dev = obj->base.dev;
  1429. struct drm_i915_private *dev_priv = dev->dev_private;
  1430. struct i915_ggtt_view view = i915_ggtt_view_normal;
  1431. pgoff_t page_offset;
  1432. unsigned long pfn;
  1433. int ret = 0;
  1434. bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
  1435. intel_runtime_pm_get(dev_priv);
  1436. /* We don't use vmf->pgoff since that has the fake offset */
  1437. page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
  1438. PAGE_SHIFT;
  1439. ret = i915_mutex_lock_interruptible(dev);
  1440. if (ret)
  1441. goto out;
  1442. trace_i915_gem_object_fault(obj, page_offset, true, write);
  1443. /* Try to flush the object off the GPU first without holding the lock.
  1444. * Upon reacquiring the lock, we will perform our sanity checks and then
  1445. * repeat the flush holding the lock in the normal manner to catch cases
  1446. * where we are gazumped.
  1447. */
  1448. ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
  1449. if (ret)
  1450. goto unlock;
  1451. /* Access to snoopable pages through the GTT is incoherent. */
  1452. if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
  1453. ret = -EFAULT;
  1454. goto unlock;
  1455. }
  1456. /* Use a partial view if the object is bigger than the aperture. */
  1457. if (obj->base.size >= dev_priv->gtt.mappable_end &&
  1458. obj->tiling_mode == I915_TILING_NONE) {
  1459. static const unsigned int chunk_size = 256; // 1 MiB
  1460. memset(&view, 0, sizeof(view));
  1461. view.type = I915_GGTT_VIEW_PARTIAL;
  1462. view.params.partial.offset = rounddown(page_offset, chunk_size);
  1463. view.params.partial.size =
  1464. min_t(unsigned int,
  1465. chunk_size,
  1466. (vma->vm_end - vma->vm_start)/PAGE_SIZE -
  1467. view.params.partial.offset);
  1468. }
  1469. /* Now pin it into the GTT if needed */
  1470. ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
  1471. if (ret)
  1472. goto unlock;
  1473. ret = i915_gem_object_set_to_gtt_domain(obj, write);
  1474. if (ret)
  1475. goto unpin;
  1476. ret = i915_gem_object_get_fence(obj);
  1477. if (ret)
  1478. goto unpin;
  1479. /* Finally, remap it using the new GTT offset */
  1480. pfn = dev_priv->gtt.mappable_base +
  1481. i915_gem_obj_ggtt_offset_view(obj, &view);
  1482. pfn >>= PAGE_SHIFT;
  1483. if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
  1484. /* Overriding existing pages in partial view does not cause
  1485. * us any trouble as TLBs are still valid because the fault
  1486. * is due to userspace losing part of the mapping or never
  1487. * having accessed it before (at this partials' range).
  1488. */
  1489. unsigned long base = vma->vm_start +
  1490. (view.params.partial.offset << PAGE_SHIFT);
  1491. unsigned int i;
  1492. for (i = 0; i < view.params.partial.size; i++) {
  1493. ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
  1494. if (ret)
  1495. break;
  1496. }
  1497. obj->fault_mappable = true;
  1498. } else {
  1499. if (!obj->fault_mappable) {
  1500. unsigned long size = min_t(unsigned long,
  1501. vma->vm_end - vma->vm_start,
  1502. obj->base.size);
  1503. int i;
  1504. for (i = 0; i < size >> PAGE_SHIFT; i++) {
  1505. ret = vm_insert_pfn(vma,
  1506. (unsigned long)vma->vm_start + i * PAGE_SIZE,
  1507. pfn + i);
  1508. if (ret)
  1509. break;
  1510. }
  1511. obj->fault_mappable = true;
  1512. } else
  1513. ret = vm_insert_pfn(vma,
  1514. (unsigned long)vmf->virtual_address,
  1515. pfn + page_offset);
  1516. }
  1517. unpin:
  1518. i915_gem_object_ggtt_unpin_view(obj, &view);
  1519. unlock:
  1520. mutex_unlock(&dev->struct_mutex);
  1521. out:
  1522. switch (ret) {
  1523. case -EIO:
  1524. /*
  1525. * We eat errors when the gpu is terminally wedged to avoid
  1526. * userspace unduly crashing (gl has no provisions for mmaps to
  1527. * fail). But any other -EIO isn't ours (e.g. swap in failure)
  1528. * and so needs to be reported.
  1529. */
  1530. if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
  1531. ret = VM_FAULT_SIGBUS;
  1532. break;
  1533. }
  1534. case -EAGAIN:
  1535. /*
  1536. * EAGAIN means the gpu is hung and we'll wait for the error
  1537. * handler to reset everything when re-faulting in
  1538. * i915_mutex_lock_interruptible.
  1539. */
  1540. case 0:
  1541. case -ERESTARTSYS:
  1542. case -EINTR:
  1543. case -EBUSY:
  1544. /*
  1545. * EBUSY is ok: this just means that another thread
  1546. * already did the job.
  1547. */
  1548. ret = VM_FAULT_NOPAGE;
  1549. break;
  1550. case -ENOMEM:
  1551. ret = VM_FAULT_OOM;
  1552. break;
  1553. case -ENOSPC:
  1554. case -EFAULT:
  1555. ret = VM_FAULT_SIGBUS;
  1556. break;
  1557. default:
  1558. WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
  1559. ret = VM_FAULT_SIGBUS;
  1560. break;
  1561. }
  1562. intel_runtime_pm_put(dev_priv);
  1563. return ret;
  1564. }
  1565. /**
  1566. * i915_gem_release_mmap - remove physical page mappings
  1567. * @obj: obj in question
  1568. *
  1569. * Preserve the reservation of the mmapping with the DRM core code, but
  1570. * relinquish ownership of the pages back to the system.
  1571. *
  1572. * It is vital that we remove the page mapping if we have mapped a tiled
  1573. * object through the GTT and then lose the fence register due to
  1574. * resource pressure. Similarly if the object has been moved out of the
  1575. * aperture, than pages mapped into userspace must be revoked. Removing the
  1576. * mapping will then trigger a page fault on the next user access, allowing
  1577. * fixup by i915_gem_fault().
  1578. */
  1579. void
  1580. i915_gem_release_mmap(struct drm_i915_gem_object *obj)
  1581. {
  1582. if (!obj->fault_mappable)
  1583. return;
  1584. drm_vma_node_unmap(&obj->base.vma_node,
  1585. obj->base.dev->anon_inode->i_mapping);
  1586. obj->fault_mappable = false;
  1587. }
  1588. void
  1589. i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
  1590. {
  1591. struct drm_i915_gem_object *obj;
  1592. list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
  1593. i915_gem_release_mmap(obj);
  1594. }
  1595. uint32_t
  1596. i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
  1597. {
  1598. uint32_t gtt_size;
  1599. if (INTEL_INFO(dev)->gen >= 4 ||
  1600. tiling_mode == I915_TILING_NONE)
  1601. return size;
  1602. /* Previous chips need a power-of-two fence region when tiling */
  1603. if (INTEL_INFO(dev)->gen == 3)
  1604. gtt_size = 1024*1024;
  1605. else
  1606. gtt_size = 512*1024;
  1607. while (gtt_size < size)
  1608. gtt_size <<= 1;
  1609. return gtt_size;
  1610. }
  1611. /**
  1612. * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1613. * @obj: object to check
  1614. *
  1615. * Return the required GTT alignment for an object, taking into account
  1616. * potential fence register mapping.
  1617. */
  1618. uint32_t
  1619. i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
  1620. int tiling_mode, bool fenced)
  1621. {
  1622. /*
  1623. * Minimum alignment is 4k (GTT page size), but might be greater
  1624. * if a fence register is needed for the object.
  1625. */
  1626. if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
  1627. tiling_mode == I915_TILING_NONE)
  1628. return 4096;
  1629. /*
  1630. * Previous chips need to be aligned to the size of the smallest
  1631. * fence register that can contain the object.
  1632. */
  1633. return i915_gem_get_gtt_size(dev, size, tiling_mode);
  1634. }
  1635. static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
  1636. {
  1637. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1638. int ret;
  1639. if (drm_vma_node_has_offset(&obj->base.vma_node))
  1640. return 0;
  1641. dev_priv->mm.shrinker_no_lock_stealing = true;
  1642. ret = drm_gem_create_mmap_offset(&obj->base);
  1643. if (ret != -ENOSPC)
  1644. goto out;
  1645. /* Badly fragmented mmap space? The only way we can recover
  1646. * space is by destroying unwanted objects. We can't randomly release
  1647. * mmap_offsets as userspace expects them to be persistent for the
  1648. * lifetime of the objects. The closest we can is to release the
  1649. * offsets on purgeable objects by truncating it and marking it purged,
  1650. * which prevents userspace from ever using that object again.
  1651. */
  1652. i915_gem_shrink(dev_priv,
  1653. obj->base.size >> PAGE_SHIFT,
  1654. I915_SHRINK_BOUND |
  1655. I915_SHRINK_UNBOUND |
  1656. I915_SHRINK_PURGEABLE);
  1657. ret = drm_gem_create_mmap_offset(&obj->base);
  1658. if (ret != -ENOSPC)
  1659. goto out;
  1660. i915_gem_shrink_all(dev_priv);
  1661. ret = drm_gem_create_mmap_offset(&obj->base);
  1662. out:
  1663. dev_priv->mm.shrinker_no_lock_stealing = false;
  1664. return ret;
  1665. }
  1666. static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
  1667. {
  1668. drm_gem_free_mmap_offset(&obj->base);
  1669. }
  1670. int
  1671. i915_gem_mmap_gtt(struct drm_file *file,
  1672. struct drm_device *dev,
  1673. uint32_t handle,
  1674. uint64_t *offset)
  1675. {
  1676. struct drm_i915_gem_object *obj;
  1677. int ret;
  1678. ret = i915_mutex_lock_interruptible(dev);
  1679. if (ret)
  1680. return ret;
  1681. obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
  1682. if (&obj->base == NULL) {
  1683. ret = -ENOENT;
  1684. goto unlock;
  1685. }
  1686. if (obj->madv != I915_MADV_WILLNEED) {
  1687. DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
  1688. ret = -EFAULT;
  1689. goto out;
  1690. }
  1691. ret = i915_gem_object_create_mmap_offset(obj);
  1692. if (ret)
  1693. goto out;
  1694. *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
  1695. out:
  1696. drm_gem_object_unreference(&obj->base);
  1697. unlock:
  1698. mutex_unlock(&dev->struct_mutex);
  1699. return ret;
  1700. }
  1701. /**
  1702. * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1703. * @dev: DRM device
  1704. * @data: GTT mapping ioctl data
  1705. * @file: GEM object info
  1706. *
  1707. * Simply returns the fake offset to userspace so it can mmap it.
  1708. * The mmap call will end up in drm_gem_mmap(), which will set things
  1709. * up so we can get faults in the handler above.
  1710. *
  1711. * The fault handler will take care of binding the object into the GTT
  1712. * (since it may have been evicted to make room for something), allocating
  1713. * a fence register, and mapping the appropriate aperture address into
  1714. * userspace.
  1715. */
  1716. int
  1717. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1718. struct drm_file *file)
  1719. {
  1720. struct drm_i915_gem_mmap_gtt *args = data;
  1721. return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
  1722. }
  1723. /* Immediately discard the backing storage */
  1724. static void
  1725. i915_gem_object_truncate(struct drm_i915_gem_object *obj)
  1726. {
  1727. i915_gem_object_free_mmap_offset(obj);
  1728. if (obj->base.filp == NULL)
  1729. return;
  1730. /* Our goal here is to return as much of the memory as
  1731. * is possible back to the system as we are called from OOM.
  1732. * To do this we must instruct the shmfs to drop all of its
  1733. * backing pages, *now*.
  1734. */
  1735. shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
  1736. obj->madv = __I915_MADV_PURGED;
  1737. }
  1738. /* Try to discard unwanted pages */
  1739. static void
  1740. i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
  1741. {
  1742. struct address_space *mapping;
  1743. switch (obj->madv) {
  1744. case I915_MADV_DONTNEED:
  1745. i915_gem_object_truncate(obj);
  1746. case __I915_MADV_PURGED:
  1747. return;
  1748. }
  1749. if (obj->base.filp == NULL)
  1750. return;
  1751. mapping = file_inode(obj->base.filp)->i_mapping,
  1752. invalidate_mapping_pages(mapping, 0, (loff_t)-1);
  1753. }
  1754. static void
  1755. i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
  1756. {
  1757. struct sg_page_iter sg_iter;
  1758. int ret;
  1759. BUG_ON(obj->madv == __I915_MADV_PURGED);
  1760. ret = i915_gem_object_set_to_cpu_domain(obj, true);
  1761. if (ret) {
  1762. /* In the event of a disaster, abandon all caches and
  1763. * hope for the best.
  1764. */
  1765. WARN_ON(ret != -EIO);
  1766. i915_gem_clflush_object(obj, true);
  1767. obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  1768. }
  1769. i915_gem_gtt_finish_object(obj);
  1770. if (i915_gem_object_needs_bit17_swizzle(obj))
  1771. i915_gem_object_save_bit_17_swizzle(obj);
  1772. if (obj->madv == I915_MADV_DONTNEED)
  1773. obj->dirty = 0;
  1774. for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
  1775. struct page *page = sg_page_iter_page(&sg_iter);
  1776. if (obj->dirty)
  1777. set_page_dirty(page);
  1778. if (obj->madv == I915_MADV_WILLNEED)
  1779. mark_page_accessed(page);
  1780. page_cache_release(page);
  1781. }
  1782. obj->dirty = 0;
  1783. sg_free_table(obj->pages);
  1784. kfree(obj->pages);
  1785. }
  1786. int
  1787. i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
  1788. {
  1789. const struct drm_i915_gem_object_ops *ops = obj->ops;
  1790. if (obj->pages == NULL)
  1791. return 0;
  1792. if (obj->pages_pin_count)
  1793. return -EBUSY;
  1794. BUG_ON(i915_gem_obj_bound_any(obj));
  1795. /* ->put_pages might need to allocate memory for the bit17 swizzle
  1796. * array, hence protect them from being reaped by removing them from gtt
  1797. * lists early. */
  1798. list_del(&obj->global_list);
  1799. ops->put_pages(obj);
  1800. obj->pages = NULL;
  1801. i915_gem_object_invalidate(obj);
  1802. return 0;
  1803. }
  1804. static int
  1805. i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
  1806. {
  1807. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1808. int page_count, i;
  1809. struct address_space *mapping;
  1810. struct sg_table *st;
  1811. struct scatterlist *sg;
  1812. struct sg_page_iter sg_iter;
  1813. struct page *page;
  1814. unsigned long last_pfn = 0; /* suppress gcc warning */
  1815. int ret;
  1816. gfp_t gfp;
  1817. /* Assert that the object is not currently in any GPU domain. As it
  1818. * wasn't in the GTT, there shouldn't be any way it could have been in
  1819. * a GPU cache
  1820. */
  1821. BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
  1822. BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
  1823. st = kmalloc(sizeof(*st), GFP_KERNEL);
  1824. if (st == NULL)
  1825. return -ENOMEM;
  1826. page_count = obj->base.size / PAGE_SIZE;
  1827. if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
  1828. kfree(st);
  1829. return -ENOMEM;
  1830. }
  1831. /* Get the list of pages out of our struct file. They'll be pinned
  1832. * at this point until we release them.
  1833. *
  1834. * Fail silently without starting the shrinker
  1835. */
  1836. mapping = file_inode(obj->base.filp)->i_mapping;
  1837. gfp = mapping_gfp_mask(mapping);
  1838. gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
  1839. gfp &= ~(__GFP_IO | __GFP_WAIT);
  1840. sg = st->sgl;
  1841. st->nents = 0;
  1842. for (i = 0; i < page_count; i++) {
  1843. page = shmem_read_mapping_page_gfp(mapping, i, gfp);
  1844. if (IS_ERR(page)) {
  1845. i915_gem_shrink(dev_priv,
  1846. page_count,
  1847. I915_SHRINK_BOUND |
  1848. I915_SHRINK_UNBOUND |
  1849. I915_SHRINK_PURGEABLE);
  1850. page = shmem_read_mapping_page_gfp(mapping, i, gfp);
  1851. }
  1852. if (IS_ERR(page)) {
  1853. /* We've tried hard to allocate the memory by reaping
  1854. * our own buffer, now let the real VM do its job and
  1855. * go down in flames if truly OOM.
  1856. */
  1857. i915_gem_shrink_all(dev_priv);
  1858. page = shmem_read_mapping_page(mapping, i);
  1859. if (IS_ERR(page)) {
  1860. ret = PTR_ERR(page);
  1861. goto err_pages;
  1862. }
  1863. }
  1864. #ifdef CONFIG_SWIOTLB
  1865. if (swiotlb_nr_tbl()) {
  1866. st->nents++;
  1867. sg_set_page(sg, page, PAGE_SIZE, 0);
  1868. sg = sg_next(sg);
  1869. continue;
  1870. }
  1871. #endif
  1872. if (!i || page_to_pfn(page) != last_pfn + 1) {
  1873. if (i)
  1874. sg = sg_next(sg);
  1875. st->nents++;
  1876. sg_set_page(sg, page, PAGE_SIZE, 0);
  1877. } else {
  1878. sg->length += PAGE_SIZE;
  1879. }
  1880. last_pfn = page_to_pfn(page);
  1881. /* Check that the i965g/gm workaround works. */
  1882. WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
  1883. }
  1884. #ifdef CONFIG_SWIOTLB
  1885. if (!swiotlb_nr_tbl())
  1886. #endif
  1887. sg_mark_end(sg);
  1888. obj->pages = st;
  1889. ret = i915_gem_gtt_prepare_object(obj);
  1890. if (ret)
  1891. goto err_pages;
  1892. if (i915_gem_object_needs_bit17_swizzle(obj))
  1893. i915_gem_object_do_bit_17_swizzle(obj);
  1894. if (obj->tiling_mode != I915_TILING_NONE &&
  1895. dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
  1896. i915_gem_object_pin_pages(obj);
  1897. return 0;
  1898. err_pages:
  1899. sg_mark_end(sg);
  1900. for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
  1901. page_cache_release(sg_page_iter_page(&sg_iter));
  1902. sg_free_table(st);
  1903. kfree(st);
  1904. /* shmemfs first checks if there is enough memory to allocate the page
  1905. * and reports ENOSPC should there be insufficient, along with the usual
  1906. * ENOMEM for a genuine allocation failure.
  1907. *
  1908. * We use ENOSPC in our driver to mean that we have run out of aperture
  1909. * space and so want to translate the error from shmemfs back to our
  1910. * usual understanding of ENOMEM.
  1911. */
  1912. if (ret == -ENOSPC)
  1913. ret = -ENOMEM;
  1914. return ret;
  1915. }
  1916. /* Ensure that the associated pages are gathered from the backing storage
  1917. * and pinned into our object. i915_gem_object_get_pages() may be called
  1918. * multiple times before they are released by a single call to
  1919. * i915_gem_object_put_pages() - once the pages are no longer referenced
  1920. * either as a result of memory pressure (reaping pages under the shrinker)
  1921. * or as the object is itself released.
  1922. */
  1923. int
  1924. i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
  1925. {
  1926. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1927. const struct drm_i915_gem_object_ops *ops = obj->ops;
  1928. int ret;
  1929. if (obj->pages)
  1930. return 0;
  1931. if (obj->madv != I915_MADV_WILLNEED) {
  1932. DRM_DEBUG("Attempting to obtain a purgeable object\n");
  1933. return -EFAULT;
  1934. }
  1935. BUG_ON(obj->pages_pin_count);
  1936. ret = ops->get_pages(obj);
  1937. if (ret)
  1938. return ret;
  1939. list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
  1940. obj->get_page.sg = obj->pages->sgl;
  1941. obj->get_page.last = 0;
  1942. return 0;
  1943. }
  1944. void i915_vma_move_to_active(struct i915_vma *vma,
  1945. struct drm_i915_gem_request *req)
  1946. {
  1947. struct drm_i915_gem_object *obj = vma->obj;
  1948. struct intel_engine_cs *ring;
  1949. ring = i915_gem_request_get_ring(req);
  1950. /* Add a reference if we're newly entering the active list. */
  1951. if (obj->active == 0)
  1952. drm_gem_object_reference(&obj->base);
  1953. obj->active |= intel_ring_flag(ring);
  1954. list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
  1955. i915_gem_request_assign(&obj->last_read_req[ring->id], req);
  1956. list_move_tail(&vma->mm_list, &vma->vm->active_list);
  1957. }
  1958. static void
  1959. i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
  1960. {
  1961. RQ_BUG_ON(obj->last_write_req == NULL);
  1962. RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
  1963. i915_gem_request_assign(&obj->last_write_req, NULL);
  1964. intel_fb_obj_flush(obj, true, ORIGIN_CS);
  1965. }
  1966. static void
  1967. i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
  1968. {
  1969. struct i915_vma *vma;
  1970. RQ_BUG_ON(obj->last_read_req[ring] == NULL);
  1971. RQ_BUG_ON(!(obj->active & (1 << ring)));
  1972. list_del_init(&obj->ring_list[ring]);
  1973. i915_gem_request_assign(&obj->last_read_req[ring], NULL);
  1974. if (obj->last_write_req && obj->last_write_req->ring->id == ring)
  1975. i915_gem_object_retire__write(obj);
  1976. obj->active &= ~(1 << ring);
  1977. if (obj->active)
  1978. return;
  1979. /* Bump our place on the bound list to keep it roughly in LRU order
  1980. * so that we don't steal from recently used but inactive objects
  1981. * (unless we are forced to ofc!)
  1982. */
  1983. list_move_tail(&obj->global_list,
  1984. &to_i915(obj->base.dev)->mm.bound_list);
  1985. list_for_each_entry(vma, &obj->vma_list, vma_link) {
  1986. if (!list_empty(&vma->mm_list))
  1987. list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
  1988. }
  1989. i915_gem_request_assign(&obj->last_fenced_req, NULL);
  1990. drm_gem_object_unreference(&obj->base);
  1991. }
  1992. static int
  1993. i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
  1994. {
  1995. struct drm_i915_private *dev_priv = dev->dev_private;
  1996. struct intel_engine_cs *ring;
  1997. int ret, i, j;
  1998. /* Carefully retire all requests without writing to the rings */
  1999. for_each_ring(ring, dev_priv, i) {
  2000. ret = intel_ring_idle(ring);
  2001. if (ret)
  2002. return ret;
  2003. }
  2004. i915_gem_retire_requests(dev);
  2005. /* Finally reset hw state */
  2006. for_each_ring(ring, dev_priv, i) {
  2007. intel_ring_init_seqno(ring, seqno);
  2008. for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
  2009. ring->semaphore.sync_seqno[j] = 0;
  2010. }
  2011. return 0;
  2012. }
  2013. int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
  2014. {
  2015. struct drm_i915_private *dev_priv = dev->dev_private;
  2016. int ret;
  2017. if (seqno == 0)
  2018. return -EINVAL;
  2019. /* HWS page needs to be set less than what we
  2020. * will inject to ring
  2021. */
  2022. ret = i915_gem_init_seqno(dev, seqno - 1);
  2023. if (ret)
  2024. return ret;
  2025. /* Carefully set the last_seqno value so that wrap
  2026. * detection still works
  2027. */
  2028. dev_priv->next_seqno = seqno;
  2029. dev_priv->last_seqno = seqno - 1;
  2030. if (dev_priv->last_seqno == 0)
  2031. dev_priv->last_seqno--;
  2032. return 0;
  2033. }
  2034. int
  2035. i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
  2036. {
  2037. struct drm_i915_private *dev_priv = dev->dev_private;
  2038. /* reserve 0 for non-seqno */
  2039. if (dev_priv->next_seqno == 0) {
  2040. int ret = i915_gem_init_seqno(dev, 0);
  2041. if (ret)
  2042. return ret;
  2043. dev_priv->next_seqno = 1;
  2044. }
  2045. *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
  2046. return 0;
  2047. }
  2048. /*
  2049. * NB: This function is not allowed to fail. Doing so would mean the the
  2050. * request is not being tracked for completion but the work itself is
  2051. * going to happen on the hardware. This would be a Bad Thing(tm).
  2052. */
  2053. void __i915_add_request(struct drm_i915_gem_request *request,
  2054. struct drm_i915_gem_object *obj,
  2055. bool flush_caches)
  2056. {
  2057. struct intel_engine_cs *ring;
  2058. struct drm_i915_private *dev_priv;
  2059. struct intel_ringbuffer *ringbuf;
  2060. u32 request_start;
  2061. int ret;
  2062. if (WARN_ON(request == NULL))
  2063. return;
  2064. ring = request->ring;
  2065. dev_priv = ring->dev->dev_private;
  2066. ringbuf = request->ringbuf;
  2067. /*
  2068. * To ensure that this call will not fail, space for its emissions
  2069. * should already have been reserved in the ring buffer. Let the ring
  2070. * know that it is time to use that space up.
  2071. */
  2072. intel_ring_reserved_space_use(ringbuf);
  2073. request_start = intel_ring_get_tail(ringbuf);
  2074. /*
  2075. * Emit any outstanding flushes - execbuf can fail to emit the flush
  2076. * after having emitted the batchbuffer command. Hence we need to fix
  2077. * things up similar to emitting the lazy request. The difference here
  2078. * is that the flush _must_ happen before the next request, no matter
  2079. * what.
  2080. */
  2081. if (flush_caches) {
  2082. if (i915.enable_execlists)
  2083. ret = logical_ring_flush_all_caches(request);
  2084. else
  2085. ret = intel_ring_flush_all_caches(request);
  2086. /* Not allowed to fail! */
  2087. WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
  2088. }
  2089. /* Record the position of the start of the request so that
  2090. * should we detect the updated seqno part-way through the
  2091. * GPU processing the request, we never over-estimate the
  2092. * position of the head.
  2093. */
  2094. request->postfix = intel_ring_get_tail(ringbuf);
  2095. if (i915.enable_execlists)
  2096. ret = ring->emit_request(request);
  2097. else {
  2098. ret = ring->add_request(request);
  2099. request->tail = intel_ring_get_tail(ringbuf);
  2100. }
  2101. /* Not allowed to fail! */
  2102. WARN(ret, "emit|add_request failed: %d!\n", ret);
  2103. request->head = request_start;
  2104. /* Whilst this request exists, batch_obj will be on the
  2105. * active_list, and so will hold the active reference. Only when this
  2106. * request is retired will the the batch_obj be moved onto the
  2107. * inactive_list and lose its active reference. Hence we do not need
  2108. * to explicitly hold another reference here.
  2109. */
  2110. request->batch_obj = obj;
  2111. request->emitted_jiffies = jiffies;
  2112. ring->last_submitted_seqno = request->seqno;
  2113. list_add_tail(&request->list, &ring->request_list);
  2114. trace_i915_gem_request_add(request);
  2115. i915_queue_hangcheck(ring->dev);
  2116. queue_delayed_work(dev_priv->wq,
  2117. &dev_priv->mm.retire_work,
  2118. round_jiffies_up_relative(HZ));
  2119. intel_mark_busy(dev_priv->dev);
  2120. /* Sanity check that the reserved size was large enough. */
  2121. intel_ring_reserved_space_end(ringbuf);
  2122. }
  2123. static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
  2124. const struct intel_context *ctx)
  2125. {
  2126. unsigned long elapsed;
  2127. elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
  2128. if (ctx->hang_stats.banned)
  2129. return true;
  2130. if (ctx->hang_stats.ban_period_seconds &&
  2131. elapsed <= ctx->hang_stats.ban_period_seconds) {
  2132. if (!i915_gem_context_is_default(ctx)) {
  2133. DRM_DEBUG("context hanging too fast, banning!\n");
  2134. return true;
  2135. } else if (i915_stop_ring_allow_ban(dev_priv)) {
  2136. if (i915_stop_ring_allow_warn(dev_priv))
  2137. DRM_ERROR("gpu hanging too fast, banning!\n");
  2138. return true;
  2139. }
  2140. }
  2141. return false;
  2142. }
  2143. static void i915_set_reset_status(struct drm_i915_private *dev_priv,
  2144. struct intel_context *ctx,
  2145. const bool guilty)
  2146. {
  2147. struct i915_ctx_hang_stats *hs;
  2148. if (WARN_ON(!ctx))
  2149. return;
  2150. hs = &ctx->hang_stats;
  2151. if (guilty) {
  2152. hs->banned = i915_context_is_banned(dev_priv, ctx);
  2153. hs->batch_active++;
  2154. hs->guilty_ts = get_seconds();
  2155. } else {
  2156. hs->batch_pending++;
  2157. }
  2158. }
  2159. void i915_gem_request_free(struct kref *req_ref)
  2160. {
  2161. struct drm_i915_gem_request *req = container_of(req_ref,
  2162. typeof(*req), ref);
  2163. struct intel_context *ctx = req->ctx;
  2164. if (req->file_priv)
  2165. i915_gem_request_remove_from_client(req);
  2166. if (ctx) {
  2167. if (i915.enable_execlists) {
  2168. if (ctx != req->ring->default_context)
  2169. intel_lr_context_unpin(req);
  2170. }
  2171. i915_gem_context_unreference(ctx);
  2172. }
  2173. kmem_cache_free(req->i915->requests, req);
  2174. }
  2175. int i915_gem_request_alloc(struct intel_engine_cs *ring,
  2176. struct intel_context *ctx,
  2177. struct drm_i915_gem_request **req_out)
  2178. {
  2179. struct drm_i915_private *dev_priv = to_i915(ring->dev);
  2180. struct drm_i915_gem_request *req;
  2181. int ret;
  2182. if (!req_out)
  2183. return -EINVAL;
  2184. *req_out = NULL;
  2185. req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
  2186. if (req == NULL)
  2187. return -ENOMEM;
  2188. ret = i915_gem_get_seqno(ring->dev, &req->seqno);
  2189. if (ret)
  2190. goto err;
  2191. kref_init(&req->ref);
  2192. req->i915 = dev_priv;
  2193. req->ring = ring;
  2194. req->ctx = ctx;
  2195. i915_gem_context_reference(req->ctx);
  2196. if (i915.enable_execlists)
  2197. ret = intel_logical_ring_alloc_request_extras(req);
  2198. else
  2199. ret = intel_ring_alloc_request_extras(req);
  2200. if (ret) {
  2201. i915_gem_context_unreference(req->ctx);
  2202. goto err;
  2203. }
  2204. /*
  2205. * Reserve space in the ring buffer for all the commands required to
  2206. * eventually emit this request. This is to guarantee that the
  2207. * i915_add_request() call can't fail. Note that the reserve may need
  2208. * to be redone if the request is not actually submitted straight
  2209. * away, e.g. because a GPU scheduler has deferred it.
  2210. */
  2211. if (i915.enable_execlists)
  2212. ret = intel_logical_ring_reserve_space(req);
  2213. else
  2214. ret = intel_ring_reserve_space(req);
  2215. if (ret) {
  2216. /*
  2217. * At this point, the request is fully allocated even if not
  2218. * fully prepared. Thus it can be cleaned up using the proper
  2219. * free code.
  2220. */
  2221. i915_gem_request_cancel(req);
  2222. return ret;
  2223. }
  2224. *req_out = req;
  2225. return 0;
  2226. err:
  2227. kmem_cache_free(dev_priv->requests, req);
  2228. return ret;
  2229. }
  2230. void i915_gem_request_cancel(struct drm_i915_gem_request *req)
  2231. {
  2232. intel_ring_reserved_space_cancel(req->ringbuf);
  2233. i915_gem_request_unreference(req);
  2234. }
  2235. struct drm_i915_gem_request *
  2236. i915_gem_find_active_request(struct intel_engine_cs *ring)
  2237. {
  2238. struct drm_i915_gem_request *request;
  2239. list_for_each_entry(request, &ring->request_list, list) {
  2240. if (i915_gem_request_completed(request, false))
  2241. continue;
  2242. return request;
  2243. }
  2244. return NULL;
  2245. }
  2246. static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
  2247. struct intel_engine_cs *ring)
  2248. {
  2249. struct drm_i915_gem_request *request;
  2250. bool ring_hung;
  2251. request = i915_gem_find_active_request(ring);
  2252. if (request == NULL)
  2253. return;
  2254. ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
  2255. i915_set_reset_status(dev_priv, request->ctx, ring_hung);
  2256. list_for_each_entry_continue(request, &ring->request_list, list)
  2257. i915_set_reset_status(dev_priv, request->ctx, false);
  2258. }
  2259. static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
  2260. struct intel_engine_cs *ring)
  2261. {
  2262. while (!list_empty(&ring->active_list)) {
  2263. struct drm_i915_gem_object *obj;
  2264. obj = list_first_entry(&ring->active_list,
  2265. struct drm_i915_gem_object,
  2266. ring_list[ring->id]);
  2267. i915_gem_object_retire__read(obj, ring->id);
  2268. }
  2269. /*
  2270. * Clear the execlists queue up before freeing the requests, as those
  2271. * are the ones that keep the context and ringbuffer backing objects
  2272. * pinned in place.
  2273. */
  2274. while (!list_empty(&ring->execlist_queue)) {
  2275. struct drm_i915_gem_request *submit_req;
  2276. submit_req = list_first_entry(&ring->execlist_queue,
  2277. struct drm_i915_gem_request,
  2278. execlist_link);
  2279. list_del(&submit_req->execlist_link);
  2280. if (submit_req->ctx != ring->default_context)
  2281. intel_lr_context_unpin(submit_req);
  2282. i915_gem_request_unreference(submit_req);
  2283. }
  2284. /*
  2285. * We must free the requests after all the corresponding objects have
  2286. * been moved off active lists. Which is the same order as the normal
  2287. * retire_requests function does. This is important if object hold
  2288. * implicit references on things like e.g. ppgtt address spaces through
  2289. * the request.
  2290. */
  2291. while (!list_empty(&ring->request_list)) {
  2292. struct drm_i915_gem_request *request;
  2293. request = list_first_entry(&ring->request_list,
  2294. struct drm_i915_gem_request,
  2295. list);
  2296. i915_gem_request_retire(request);
  2297. }
  2298. }
  2299. void i915_gem_reset(struct drm_device *dev)
  2300. {
  2301. struct drm_i915_private *dev_priv = dev->dev_private;
  2302. struct intel_engine_cs *ring;
  2303. int i;
  2304. /*
  2305. * Before we free the objects from the requests, we need to inspect
  2306. * them for finding the guilty party. As the requests only borrow
  2307. * their reference to the objects, the inspection must be done first.
  2308. */
  2309. for_each_ring(ring, dev_priv, i)
  2310. i915_gem_reset_ring_status(dev_priv, ring);
  2311. for_each_ring(ring, dev_priv, i)
  2312. i915_gem_reset_ring_cleanup(dev_priv, ring);
  2313. i915_gem_context_reset(dev);
  2314. i915_gem_restore_fences(dev);
  2315. WARN_ON(i915_verify_lists(dev));
  2316. }
  2317. /**
  2318. * This function clears the request list as sequence numbers are passed.
  2319. */
  2320. void
  2321. i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
  2322. {
  2323. WARN_ON(i915_verify_lists(ring->dev));
  2324. /* Retire requests first as we use it above for the early return.
  2325. * If we retire requests last, we may use a later seqno and so clear
  2326. * the requests lists without clearing the active list, leading to
  2327. * confusion.
  2328. */
  2329. while (!list_empty(&ring->request_list)) {
  2330. struct drm_i915_gem_request *request;
  2331. request = list_first_entry(&ring->request_list,
  2332. struct drm_i915_gem_request,
  2333. list);
  2334. if (!i915_gem_request_completed(request, true))
  2335. break;
  2336. i915_gem_request_retire(request);
  2337. }
  2338. /* Move any buffers on the active list that are no longer referenced
  2339. * by the ringbuffer to the flushing/inactive lists as appropriate,
  2340. * before we free the context associated with the requests.
  2341. */
  2342. while (!list_empty(&ring->active_list)) {
  2343. struct drm_i915_gem_object *obj;
  2344. obj = list_first_entry(&ring->active_list,
  2345. struct drm_i915_gem_object,
  2346. ring_list[ring->id]);
  2347. if (!list_empty(&obj->last_read_req[ring->id]->list))
  2348. break;
  2349. i915_gem_object_retire__read(obj, ring->id);
  2350. }
  2351. if (unlikely(ring->trace_irq_req &&
  2352. i915_gem_request_completed(ring->trace_irq_req, true))) {
  2353. ring->irq_put(ring);
  2354. i915_gem_request_assign(&ring->trace_irq_req, NULL);
  2355. }
  2356. WARN_ON(i915_verify_lists(ring->dev));
  2357. }
  2358. bool
  2359. i915_gem_retire_requests(struct drm_device *dev)
  2360. {
  2361. struct drm_i915_private *dev_priv = dev->dev_private;
  2362. struct intel_engine_cs *ring;
  2363. bool idle = true;
  2364. int i;
  2365. for_each_ring(ring, dev_priv, i) {
  2366. i915_gem_retire_requests_ring(ring);
  2367. idle &= list_empty(&ring->request_list);
  2368. if (i915.enable_execlists) {
  2369. unsigned long flags;
  2370. spin_lock_irqsave(&ring->execlist_lock, flags);
  2371. idle &= list_empty(&ring->execlist_queue);
  2372. spin_unlock_irqrestore(&ring->execlist_lock, flags);
  2373. intel_execlists_retire_requests(ring);
  2374. }
  2375. }
  2376. if (idle)
  2377. mod_delayed_work(dev_priv->wq,
  2378. &dev_priv->mm.idle_work,
  2379. msecs_to_jiffies(100));
  2380. return idle;
  2381. }
  2382. static void
  2383. i915_gem_retire_work_handler(struct work_struct *work)
  2384. {
  2385. struct drm_i915_private *dev_priv =
  2386. container_of(work, typeof(*dev_priv), mm.retire_work.work);
  2387. struct drm_device *dev = dev_priv->dev;
  2388. bool idle;
  2389. /* Come back later if the device is busy... */
  2390. idle = false;
  2391. if (mutex_trylock(&dev->struct_mutex)) {
  2392. idle = i915_gem_retire_requests(dev);
  2393. mutex_unlock(&dev->struct_mutex);
  2394. }
  2395. if (!idle)
  2396. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
  2397. round_jiffies_up_relative(HZ));
  2398. }
  2399. static void
  2400. i915_gem_idle_work_handler(struct work_struct *work)
  2401. {
  2402. struct drm_i915_private *dev_priv =
  2403. container_of(work, typeof(*dev_priv), mm.idle_work.work);
  2404. struct drm_device *dev = dev_priv->dev;
  2405. struct intel_engine_cs *ring;
  2406. int i;
  2407. for_each_ring(ring, dev_priv, i)
  2408. if (!list_empty(&ring->request_list))
  2409. return;
  2410. intel_mark_idle(dev);
  2411. if (mutex_trylock(&dev->struct_mutex)) {
  2412. struct intel_engine_cs *ring;
  2413. int i;
  2414. for_each_ring(ring, dev_priv, i)
  2415. i915_gem_batch_pool_fini(&ring->batch_pool);
  2416. mutex_unlock(&dev->struct_mutex);
  2417. }
  2418. }
  2419. /**
  2420. * Ensures that an object will eventually get non-busy by flushing any required
  2421. * write domains, emitting any outstanding lazy request and retiring and
  2422. * completed requests.
  2423. */
  2424. static int
  2425. i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
  2426. {
  2427. int i;
  2428. if (!obj->active)
  2429. return 0;
  2430. for (i = 0; i < I915_NUM_RINGS; i++) {
  2431. struct drm_i915_gem_request *req;
  2432. req = obj->last_read_req[i];
  2433. if (req == NULL)
  2434. continue;
  2435. if (list_empty(&req->list))
  2436. goto retire;
  2437. if (i915_gem_request_completed(req, true)) {
  2438. __i915_gem_request_retire__upto(req);
  2439. retire:
  2440. i915_gem_object_retire__read(obj, i);
  2441. }
  2442. }
  2443. return 0;
  2444. }
  2445. /**
  2446. * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
  2447. * @DRM_IOCTL_ARGS: standard ioctl arguments
  2448. *
  2449. * Returns 0 if successful, else an error is returned with the remaining time in
  2450. * the timeout parameter.
  2451. * -ETIME: object is still busy after timeout
  2452. * -ERESTARTSYS: signal interrupted the wait
  2453. * -ENONENT: object doesn't exist
  2454. * Also possible, but rare:
  2455. * -EAGAIN: GPU wedged
  2456. * -ENOMEM: damn
  2457. * -ENODEV: Internal IRQ fail
  2458. * -E?: The add request failed
  2459. *
  2460. * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
  2461. * non-zero timeout parameter the wait ioctl will wait for the given number of
  2462. * nanoseconds on an object becoming unbusy. Since the wait itself does so
  2463. * without holding struct_mutex the object may become re-busied before this
  2464. * function completes. A similar but shorter * race condition exists in the busy
  2465. * ioctl
  2466. */
  2467. int
  2468. i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  2469. {
  2470. struct drm_i915_private *dev_priv = dev->dev_private;
  2471. struct drm_i915_gem_wait *args = data;
  2472. struct drm_i915_gem_object *obj;
  2473. struct drm_i915_gem_request *req[I915_NUM_RINGS];
  2474. unsigned reset_counter;
  2475. int i, n = 0;
  2476. int ret;
  2477. if (args->flags != 0)
  2478. return -EINVAL;
  2479. ret = i915_mutex_lock_interruptible(dev);
  2480. if (ret)
  2481. return ret;
  2482. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
  2483. if (&obj->base == NULL) {
  2484. mutex_unlock(&dev->struct_mutex);
  2485. return -ENOENT;
  2486. }
  2487. /* Need to make sure the object gets inactive eventually. */
  2488. ret = i915_gem_object_flush_active(obj);
  2489. if (ret)
  2490. goto out;
  2491. if (!obj->active)
  2492. goto out;
  2493. /* Do this after OLR check to make sure we make forward progress polling
  2494. * on this IOCTL with a timeout == 0 (like busy ioctl)
  2495. */
  2496. if (args->timeout_ns == 0) {
  2497. ret = -ETIME;
  2498. goto out;
  2499. }
  2500. drm_gem_object_unreference(&obj->base);
  2501. reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  2502. for (i = 0; i < I915_NUM_RINGS; i++) {
  2503. if (obj->last_read_req[i] == NULL)
  2504. continue;
  2505. req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
  2506. }
  2507. mutex_unlock(&dev->struct_mutex);
  2508. for (i = 0; i < n; i++) {
  2509. if (ret == 0)
  2510. ret = __i915_wait_request(req[i], reset_counter, true,
  2511. args->timeout_ns > 0 ? &args->timeout_ns : NULL,
  2512. file->driver_priv);
  2513. i915_gem_request_unreference__unlocked(req[i]);
  2514. }
  2515. return ret;
  2516. out:
  2517. drm_gem_object_unreference(&obj->base);
  2518. mutex_unlock(&dev->struct_mutex);
  2519. return ret;
  2520. }
  2521. static int
  2522. __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  2523. struct intel_engine_cs *to,
  2524. struct drm_i915_gem_request *from_req,
  2525. struct drm_i915_gem_request **to_req)
  2526. {
  2527. struct intel_engine_cs *from;
  2528. int ret;
  2529. from = i915_gem_request_get_ring(from_req);
  2530. if (to == from)
  2531. return 0;
  2532. if (i915_gem_request_completed(from_req, true))
  2533. return 0;
  2534. if (!i915_semaphore_is_enabled(obj->base.dev)) {
  2535. struct drm_i915_private *i915 = to_i915(obj->base.dev);
  2536. ret = __i915_wait_request(from_req,
  2537. atomic_read(&i915->gpu_error.reset_counter),
  2538. i915->mm.interruptible,
  2539. NULL,
  2540. &i915->rps.semaphores);
  2541. if (ret)
  2542. return ret;
  2543. i915_gem_object_retire_request(obj, from_req);
  2544. } else {
  2545. int idx = intel_ring_sync_index(from, to);
  2546. u32 seqno = i915_gem_request_get_seqno(from_req);
  2547. WARN_ON(!to_req);
  2548. if (seqno <= from->semaphore.sync_seqno[idx])
  2549. return 0;
  2550. if (*to_req == NULL) {
  2551. ret = i915_gem_request_alloc(to, to->default_context, to_req);
  2552. if (ret)
  2553. return ret;
  2554. }
  2555. trace_i915_gem_ring_sync_to(*to_req, from, from_req);
  2556. ret = to->semaphore.sync_to(*to_req, from, seqno);
  2557. if (ret)
  2558. return ret;
  2559. /* We use last_read_req because sync_to()
  2560. * might have just caused seqno wrap under
  2561. * the radar.
  2562. */
  2563. from->semaphore.sync_seqno[idx] =
  2564. i915_gem_request_get_seqno(obj->last_read_req[from->id]);
  2565. }
  2566. return 0;
  2567. }
  2568. /**
  2569. * i915_gem_object_sync - sync an object to a ring.
  2570. *
  2571. * @obj: object which may be in use on another ring.
  2572. * @to: ring we wish to use the object on. May be NULL.
  2573. * @to_req: request we wish to use the object for. See below.
  2574. * This will be allocated and returned if a request is
  2575. * required but not passed in.
  2576. *
  2577. * This code is meant to abstract object synchronization with the GPU.
  2578. * Calling with NULL implies synchronizing the object with the CPU
  2579. * rather than a particular GPU ring. Conceptually we serialise writes
  2580. * between engines inside the GPU. We only allow one engine to write
  2581. * into a buffer at any time, but multiple readers. To ensure each has
  2582. * a coherent view of memory, we must:
  2583. *
  2584. * - If there is an outstanding write request to the object, the new
  2585. * request must wait for it to complete (either CPU or in hw, requests
  2586. * on the same ring will be naturally ordered).
  2587. *
  2588. * - If we are a write request (pending_write_domain is set), the new
  2589. * request must wait for outstanding read requests to complete.
  2590. *
  2591. * For CPU synchronisation (NULL to) no request is required. For syncing with
  2592. * rings to_req must be non-NULL. However, a request does not have to be
  2593. * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
  2594. * request will be allocated automatically and returned through *to_req. Note
  2595. * that it is not guaranteed that commands will be emitted (because the system
  2596. * might already be idle). Hence there is no need to create a request that
  2597. * might never have any work submitted. Note further that if a request is
  2598. * returned in *to_req, it is the responsibility of the caller to submit
  2599. * that request (after potentially adding more work to it).
  2600. *
  2601. * Returns 0 if successful, else propagates up the lower layer error.
  2602. */
  2603. int
  2604. i915_gem_object_sync(struct drm_i915_gem_object *obj,
  2605. struct intel_engine_cs *to,
  2606. struct drm_i915_gem_request **to_req)
  2607. {
  2608. const bool readonly = obj->base.pending_write_domain == 0;
  2609. struct drm_i915_gem_request *req[I915_NUM_RINGS];
  2610. int ret, i, n;
  2611. if (!obj->active)
  2612. return 0;
  2613. if (to == NULL)
  2614. return i915_gem_object_wait_rendering(obj, readonly);
  2615. n = 0;
  2616. if (readonly) {
  2617. if (obj->last_write_req)
  2618. req[n++] = obj->last_write_req;
  2619. } else {
  2620. for (i = 0; i < I915_NUM_RINGS; i++)
  2621. if (obj->last_read_req[i])
  2622. req[n++] = obj->last_read_req[i];
  2623. }
  2624. for (i = 0; i < n; i++) {
  2625. ret = __i915_gem_object_sync(obj, to, req[i], to_req);
  2626. if (ret)
  2627. return ret;
  2628. }
  2629. return 0;
  2630. }
  2631. static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
  2632. {
  2633. u32 old_write_domain, old_read_domains;
  2634. /* Force a pagefault for domain tracking on next user access */
  2635. i915_gem_release_mmap(obj);
  2636. if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
  2637. return;
  2638. /* Wait for any direct GTT access to complete */
  2639. mb();
  2640. old_read_domains = obj->base.read_domains;
  2641. old_write_domain = obj->base.write_domain;
  2642. obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
  2643. obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
  2644. trace_i915_gem_object_change_domain(obj,
  2645. old_read_domains,
  2646. old_write_domain);
  2647. }
  2648. int i915_vma_unbind(struct i915_vma *vma)
  2649. {
  2650. struct drm_i915_gem_object *obj = vma->obj;
  2651. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2652. int ret;
  2653. if (list_empty(&vma->vma_link))
  2654. return 0;
  2655. if (!drm_mm_node_allocated(&vma->node)) {
  2656. i915_gem_vma_destroy(vma);
  2657. return 0;
  2658. }
  2659. if (vma->pin_count)
  2660. return -EBUSY;
  2661. BUG_ON(obj->pages == NULL);
  2662. ret = i915_gem_object_wait_rendering(obj, false);
  2663. if (ret)
  2664. return ret;
  2665. /* Continue on if we fail due to EIO, the GPU is hung so we
  2666. * should be safe and we need to cleanup or else we might
  2667. * cause memory corruption through use-after-free.
  2668. */
  2669. if (i915_is_ggtt(vma->vm) &&
  2670. vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
  2671. i915_gem_object_finish_gtt(obj);
  2672. /* release the fence reg _after_ flushing */
  2673. ret = i915_gem_object_put_fence(obj);
  2674. if (ret)
  2675. return ret;
  2676. }
  2677. trace_i915_vma_unbind(vma);
  2678. vma->vm->unbind_vma(vma);
  2679. vma->bound = 0;
  2680. list_del_init(&vma->mm_list);
  2681. if (i915_is_ggtt(vma->vm)) {
  2682. if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
  2683. obj->map_and_fenceable = false;
  2684. } else if (vma->ggtt_view.pages) {
  2685. sg_free_table(vma->ggtt_view.pages);
  2686. kfree(vma->ggtt_view.pages);
  2687. }
  2688. vma->ggtt_view.pages = NULL;
  2689. }
  2690. drm_mm_remove_node(&vma->node);
  2691. i915_gem_vma_destroy(vma);
  2692. /* Since the unbound list is global, only move to that list if
  2693. * no more VMAs exist. */
  2694. if (list_empty(&obj->vma_list))
  2695. list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
  2696. /* And finally now the object is completely decoupled from this vma,
  2697. * we can drop its hold on the backing storage and allow it to be
  2698. * reaped by the shrinker.
  2699. */
  2700. i915_gem_object_unpin_pages(obj);
  2701. return 0;
  2702. }
  2703. int i915_gpu_idle(struct drm_device *dev)
  2704. {
  2705. struct drm_i915_private *dev_priv = dev->dev_private;
  2706. struct intel_engine_cs *ring;
  2707. int ret, i;
  2708. /* Flush everything onto the inactive list. */
  2709. for_each_ring(ring, dev_priv, i) {
  2710. if (!i915.enable_execlists) {
  2711. struct drm_i915_gem_request *req;
  2712. ret = i915_gem_request_alloc(ring, ring->default_context, &req);
  2713. if (ret)
  2714. return ret;
  2715. ret = i915_switch_context(req);
  2716. if (ret) {
  2717. i915_gem_request_cancel(req);
  2718. return ret;
  2719. }
  2720. i915_add_request_no_flush(req);
  2721. }
  2722. ret = intel_ring_idle(ring);
  2723. if (ret)
  2724. return ret;
  2725. }
  2726. WARN_ON(i915_verify_lists(dev));
  2727. return 0;
  2728. }
  2729. static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
  2730. unsigned long cache_level)
  2731. {
  2732. struct drm_mm_node *gtt_space = &vma->node;
  2733. struct drm_mm_node *other;
  2734. /*
  2735. * On some machines we have to be careful when putting differing types
  2736. * of snoopable memory together to avoid the prefetcher crossing memory
  2737. * domains and dying. During vm initialisation, we decide whether or not
  2738. * these constraints apply and set the drm_mm.color_adjust
  2739. * appropriately.
  2740. */
  2741. if (vma->vm->mm.color_adjust == NULL)
  2742. return true;
  2743. if (!drm_mm_node_allocated(gtt_space))
  2744. return true;
  2745. if (list_empty(&gtt_space->node_list))
  2746. return true;
  2747. other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
  2748. if (other->allocated && !other->hole_follows && other->color != cache_level)
  2749. return false;
  2750. other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
  2751. if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
  2752. return false;
  2753. return true;
  2754. }
  2755. /**
  2756. * Finds free space in the GTT aperture and binds the object or a view of it
  2757. * there.
  2758. */
  2759. static struct i915_vma *
  2760. i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
  2761. struct i915_address_space *vm,
  2762. const struct i915_ggtt_view *ggtt_view,
  2763. unsigned alignment,
  2764. uint64_t flags)
  2765. {
  2766. struct drm_device *dev = obj->base.dev;
  2767. struct drm_i915_private *dev_priv = dev->dev_private;
  2768. u32 size, fence_size, fence_alignment, unfenced_alignment;
  2769. u64 start =
  2770. flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
  2771. u64 end =
  2772. flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
  2773. struct i915_vma *vma;
  2774. int ret;
  2775. if (i915_is_ggtt(vm)) {
  2776. u32 view_size;
  2777. if (WARN_ON(!ggtt_view))
  2778. return ERR_PTR(-EINVAL);
  2779. view_size = i915_ggtt_view_size(obj, ggtt_view);
  2780. fence_size = i915_gem_get_gtt_size(dev,
  2781. view_size,
  2782. obj->tiling_mode);
  2783. fence_alignment = i915_gem_get_gtt_alignment(dev,
  2784. view_size,
  2785. obj->tiling_mode,
  2786. true);
  2787. unfenced_alignment = i915_gem_get_gtt_alignment(dev,
  2788. view_size,
  2789. obj->tiling_mode,
  2790. false);
  2791. size = flags & PIN_MAPPABLE ? fence_size : view_size;
  2792. } else {
  2793. fence_size = i915_gem_get_gtt_size(dev,
  2794. obj->base.size,
  2795. obj->tiling_mode);
  2796. fence_alignment = i915_gem_get_gtt_alignment(dev,
  2797. obj->base.size,
  2798. obj->tiling_mode,
  2799. true);
  2800. unfenced_alignment =
  2801. i915_gem_get_gtt_alignment(dev,
  2802. obj->base.size,
  2803. obj->tiling_mode,
  2804. false);
  2805. size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
  2806. }
  2807. if (alignment == 0)
  2808. alignment = flags & PIN_MAPPABLE ? fence_alignment :
  2809. unfenced_alignment;
  2810. if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
  2811. DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
  2812. ggtt_view ? ggtt_view->type : 0,
  2813. alignment);
  2814. return ERR_PTR(-EINVAL);
  2815. }
  2816. /* If binding the object/GGTT view requires more space than the entire
  2817. * aperture has, reject it early before evicting everything in a vain
  2818. * attempt to find space.
  2819. */
  2820. if (size > end) {
  2821. DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
  2822. ggtt_view ? ggtt_view->type : 0,
  2823. size,
  2824. flags & PIN_MAPPABLE ? "mappable" : "total",
  2825. end);
  2826. return ERR_PTR(-E2BIG);
  2827. }
  2828. ret = i915_gem_object_get_pages(obj);
  2829. if (ret)
  2830. return ERR_PTR(ret);
  2831. i915_gem_object_pin_pages(obj);
  2832. vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
  2833. i915_gem_obj_lookup_or_create_vma(obj, vm);
  2834. if (IS_ERR(vma))
  2835. goto err_unpin;
  2836. search_free:
  2837. ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
  2838. size, alignment,
  2839. obj->cache_level,
  2840. start, end,
  2841. DRM_MM_SEARCH_DEFAULT,
  2842. DRM_MM_CREATE_DEFAULT);
  2843. if (ret) {
  2844. ret = i915_gem_evict_something(dev, vm, size, alignment,
  2845. obj->cache_level,
  2846. start, end,
  2847. flags);
  2848. if (ret == 0)
  2849. goto search_free;
  2850. goto err_free_vma;
  2851. }
  2852. if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
  2853. ret = -EINVAL;
  2854. goto err_remove_node;
  2855. }
  2856. trace_i915_vma_bind(vma, flags);
  2857. ret = i915_vma_bind(vma, obj->cache_level, flags);
  2858. if (ret)
  2859. goto err_remove_node;
  2860. list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
  2861. list_add_tail(&vma->mm_list, &vm->inactive_list);
  2862. return vma;
  2863. err_remove_node:
  2864. drm_mm_remove_node(&vma->node);
  2865. err_free_vma:
  2866. i915_gem_vma_destroy(vma);
  2867. vma = ERR_PTR(ret);
  2868. err_unpin:
  2869. i915_gem_object_unpin_pages(obj);
  2870. return vma;
  2871. }
  2872. bool
  2873. i915_gem_clflush_object(struct drm_i915_gem_object *obj,
  2874. bool force)
  2875. {
  2876. /* If we don't have a page list set up, then we're not pinned
  2877. * to GPU, and we can ignore the cache flush because it'll happen
  2878. * again at bind time.
  2879. */
  2880. if (obj->pages == NULL)
  2881. return false;
  2882. /*
  2883. * Stolen memory is always coherent with the GPU as it is explicitly
  2884. * marked as wc by the system, or the system is cache-coherent.
  2885. */
  2886. if (obj->stolen || obj->phys_handle)
  2887. return false;
  2888. /* If the GPU is snooping the contents of the CPU cache,
  2889. * we do not need to manually clear the CPU cache lines. However,
  2890. * the caches are only snooped when the render cache is
  2891. * flushed/invalidated. As we always have to emit invalidations
  2892. * and flushes when moving into and out of the RENDER domain, correct
  2893. * snooping behaviour occurs naturally as the result of our domain
  2894. * tracking.
  2895. */
  2896. if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
  2897. obj->cache_dirty = true;
  2898. return false;
  2899. }
  2900. trace_i915_gem_object_clflush(obj);
  2901. drm_clflush_sg(obj->pages);
  2902. obj->cache_dirty = false;
  2903. return true;
  2904. }
  2905. /** Flushes the GTT write domain for the object if it's dirty. */
  2906. static void
  2907. i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
  2908. {
  2909. uint32_t old_write_domain;
  2910. if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
  2911. return;
  2912. /* No actual flushing is required for the GTT write domain. Writes
  2913. * to it immediately go to main memory as far as we know, so there's
  2914. * no chipset flush. It also doesn't land in render cache.
  2915. *
  2916. * However, we do have to enforce the order so that all writes through
  2917. * the GTT land before any writes to the device, such as updates to
  2918. * the GATT itself.
  2919. */
  2920. wmb();
  2921. old_write_domain = obj->base.write_domain;
  2922. obj->base.write_domain = 0;
  2923. intel_fb_obj_flush(obj, false, ORIGIN_GTT);
  2924. trace_i915_gem_object_change_domain(obj,
  2925. obj->base.read_domains,
  2926. old_write_domain);
  2927. }
  2928. /** Flushes the CPU write domain for the object if it's dirty. */
  2929. static void
  2930. i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
  2931. {
  2932. uint32_t old_write_domain;
  2933. if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
  2934. return;
  2935. if (i915_gem_clflush_object(obj, obj->pin_display))
  2936. i915_gem_chipset_flush(obj->base.dev);
  2937. old_write_domain = obj->base.write_domain;
  2938. obj->base.write_domain = 0;
  2939. intel_fb_obj_flush(obj, false, ORIGIN_CPU);
  2940. trace_i915_gem_object_change_domain(obj,
  2941. obj->base.read_domains,
  2942. old_write_domain);
  2943. }
  2944. /**
  2945. * Moves a single object to the GTT read, and possibly write domain.
  2946. *
  2947. * This function returns when the move is complete, including waiting on
  2948. * flushes to occur.
  2949. */
  2950. int
  2951. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  2952. {
  2953. uint32_t old_write_domain, old_read_domains;
  2954. struct i915_vma *vma;
  2955. int ret;
  2956. if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
  2957. return 0;
  2958. ret = i915_gem_object_wait_rendering(obj, !write);
  2959. if (ret)
  2960. return ret;
  2961. /* Flush and acquire obj->pages so that we are coherent through
  2962. * direct access in memory with previous cached writes through
  2963. * shmemfs and that our cache domain tracking remains valid.
  2964. * For example, if the obj->filp was moved to swap without us
  2965. * being notified and releasing the pages, we would mistakenly
  2966. * continue to assume that the obj remained out of the CPU cached
  2967. * domain.
  2968. */
  2969. ret = i915_gem_object_get_pages(obj);
  2970. if (ret)
  2971. return ret;
  2972. i915_gem_object_flush_cpu_write_domain(obj);
  2973. /* Serialise direct access to this object with the barriers for
  2974. * coherent writes from the GPU, by effectively invalidating the
  2975. * GTT domain upon first access.
  2976. */
  2977. if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
  2978. mb();
  2979. old_write_domain = obj->base.write_domain;
  2980. old_read_domains = obj->base.read_domains;
  2981. /* It should now be out of any other write domains, and we can update
  2982. * the domain values for our changes.
  2983. */
  2984. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  2985. obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  2986. if (write) {
  2987. obj->base.read_domains = I915_GEM_DOMAIN_GTT;
  2988. obj->base.write_domain = I915_GEM_DOMAIN_GTT;
  2989. obj->dirty = 1;
  2990. }
  2991. trace_i915_gem_object_change_domain(obj,
  2992. old_read_domains,
  2993. old_write_domain);
  2994. /* And bump the LRU for this access */
  2995. vma = i915_gem_obj_to_ggtt(obj);
  2996. if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
  2997. list_move_tail(&vma->mm_list,
  2998. &to_i915(obj->base.dev)->gtt.base.inactive_list);
  2999. return 0;
  3000. }
  3001. int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
  3002. enum i915_cache_level cache_level)
  3003. {
  3004. struct drm_device *dev = obj->base.dev;
  3005. struct i915_vma *vma, *next;
  3006. int ret;
  3007. if (obj->cache_level == cache_level)
  3008. return 0;
  3009. if (i915_gem_obj_is_pinned(obj)) {
  3010. DRM_DEBUG("can not change the cache level of pinned objects\n");
  3011. return -EBUSY;
  3012. }
  3013. list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
  3014. if (!i915_gem_valid_gtt_space(vma, cache_level)) {
  3015. ret = i915_vma_unbind(vma);
  3016. if (ret)
  3017. return ret;
  3018. }
  3019. }
  3020. if (i915_gem_obj_bound_any(obj)) {
  3021. ret = i915_gem_object_wait_rendering(obj, false);
  3022. if (ret)
  3023. return ret;
  3024. i915_gem_object_finish_gtt(obj);
  3025. /* Before SandyBridge, you could not use tiling or fence
  3026. * registers with snooped memory, so relinquish any fences
  3027. * currently pointing to our region in the aperture.
  3028. */
  3029. if (INTEL_INFO(dev)->gen < 6) {
  3030. ret = i915_gem_object_put_fence(obj);
  3031. if (ret)
  3032. return ret;
  3033. }
  3034. list_for_each_entry(vma, &obj->vma_list, vma_link)
  3035. if (drm_mm_node_allocated(&vma->node)) {
  3036. ret = i915_vma_bind(vma, cache_level,
  3037. PIN_UPDATE);
  3038. if (ret)
  3039. return ret;
  3040. }
  3041. }
  3042. list_for_each_entry(vma, &obj->vma_list, vma_link)
  3043. vma->node.color = cache_level;
  3044. obj->cache_level = cache_level;
  3045. if (obj->cache_dirty &&
  3046. obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
  3047. cpu_write_needs_clflush(obj)) {
  3048. if (i915_gem_clflush_object(obj, true))
  3049. i915_gem_chipset_flush(obj->base.dev);
  3050. }
  3051. return 0;
  3052. }
  3053. int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
  3054. struct drm_file *file)
  3055. {
  3056. struct drm_i915_gem_caching *args = data;
  3057. struct drm_i915_gem_object *obj;
  3058. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3059. if (&obj->base == NULL)
  3060. return -ENOENT;
  3061. switch (obj->cache_level) {
  3062. case I915_CACHE_LLC:
  3063. case I915_CACHE_L3_LLC:
  3064. args->caching = I915_CACHING_CACHED;
  3065. break;
  3066. case I915_CACHE_WT:
  3067. args->caching = I915_CACHING_DISPLAY;
  3068. break;
  3069. default:
  3070. args->caching = I915_CACHING_NONE;
  3071. break;
  3072. }
  3073. drm_gem_object_unreference_unlocked(&obj->base);
  3074. return 0;
  3075. }
  3076. int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
  3077. struct drm_file *file)
  3078. {
  3079. struct drm_i915_gem_caching *args = data;
  3080. struct drm_i915_gem_object *obj;
  3081. enum i915_cache_level level;
  3082. int ret;
  3083. switch (args->caching) {
  3084. case I915_CACHING_NONE:
  3085. level = I915_CACHE_NONE;
  3086. break;
  3087. case I915_CACHING_CACHED:
  3088. level = I915_CACHE_LLC;
  3089. break;
  3090. case I915_CACHING_DISPLAY:
  3091. level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
  3092. break;
  3093. default:
  3094. return -EINVAL;
  3095. }
  3096. ret = i915_mutex_lock_interruptible(dev);
  3097. if (ret)
  3098. return ret;
  3099. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3100. if (&obj->base == NULL) {
  3101. ret = -ENOENT;
  3102. goto unlock;
  3103. }
  3104. ret = i915_gem_object_set_cache_level(obj, level);
  3105. drm_gem_object_unreference(&obj->base);
  3106. unlock:
  3107. mutex_unlock(&dev->struct_mutex);
  3108. return ret;
  3109. }
  3110. /*
  3111. * Prepare buffer for display plane (scanout, cursors, etc).
  3112. * Can be called from an uninterruptible phase (modesetting) and allows
  3113. * any flushes to be pipelined (for pageflips).
  3114. */
  3115. int
  3116. i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
  3117. u32 alignment,
  3118. struct intel_engine_cs *pipelined,
  3119. struct drm_i915_gem_request **pipelined_request,
  3120. const struct i915_ggtt_view *view)
  3121. {
  3122. u32 old_read_domains, old_write_domain;
  3123. int ret;
  3124. ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
  3125. if (ret)
  3126. return ret;
  3127. /* Mark the pin_display early so that we account for the
  3128. * display coherency whilst setting up the cache domains.
  3129. */
  3130. obj->pin_display++;
  3131. /* The display engine is not coherent with the LLC cache on gen6. As
  3132. * a result, we make sure that the pinning that is about to occur is
  3133. * done with uncached PTEs. This is lowest common denominator for all
  3134. * chipsets.
  3135. *
  3136. * However for gen6+, we could do better by using the GFDT bit instead
  3137. * of uncaching, which would allow us to flush all the LLC-cached data
  3138. * with that bit in the PTE to main memory with just one PIPE_CONTROL.
  3139. */
  3140. ret = i915_gem_object_set_cache_level(obj,
  3141. HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
  3142. if (ret)
  3143. goto err_unpin_display;
  3144. /* As the user may map the buffer once pinned in the display plane
  3145. * (e.g. libkms for the bootup splash), we have to ensure that we
  3146. * always use map_and_fenceable for all scanout buffers.
  3147. */
  3148. ret = i915_gem_object_ggtt_pin(obj, view, alignment,
  3149. view->type == I915_GGTT_VIEW_NORMAL ?
  3150. PIN_MAPPABLE : 0);
  3151. if (ret)
  3152. goto err_unpin_display;
  3153. i915_gem_object_flush_cpu_write_domain(obj);
  3154. old_write_domain = obj->base.write_domain;
  3155. old_read_domains = obj->base.read_domains;
  3156. /* It should now be out of any other write domains, and we can update
  3157. * the domain values for our changes.
  3158. */
  3159. obj->base.write_domain = 0;
  3160. obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  3161. trace_i915_gem_object_change_domain(obj,
  3162. old_read_domains,
  3163. old_write_domain);
  3164. return 0;
  3165. err_unpin_display:
  3166. obj->pin_display--;
  3167. return ret;
  3168. }
  3169. void
  3170. i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
  3171. const struct i915_ggtt_view *view)
  3172. {
  3173. if (WARN_ON(obj->pin_display == 0))
  3174. return;
  3175. i915_gem_object_ggtt_unpin_view(obj, view);
  3176. obj->pin_display--;
  3177. }
  3178. /**
  3179. * Moves a single object to the CPU read, and possibly write domain.
  3180. *
  3181. * This function returns when the move is complete, including waiting on
  3182. * flushes to occur.
  3183. */
  3184. int
  3185. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
  3186. {
  3187. uint32_t old_write_domain, old_read_domains;
  3188. int ret;
  3189. if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
  3190. return 0;
  3191. ret = i915_gem_object_wait_rendering(obj, !write);
  3192. if (ret)
  3193. return ret;
  3194. i915_gem_object_flush_gtt_write_domain(obj);
  3195. old_write_domain = obj->base.write_domain;
  3196. old_read_domains = obj->base.read_domains;
  3197. /* Flush the CPU cache if it's still invalid. */
  3198. if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  3199. i915_gem_clflush_object(obj, false);
  3200. obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
  3201. }
  3202. /* It should now be out of any other write domains, and we can update
  3203. * the domain values for our changes.
  3204. */
  3205. BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  3206. /* If we're writing through the CPU, then the GPU read domains will
  3207. * need to be invalidated at next use.
  3208. */
  3209. if (write) {
  3210. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3211. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3212. }
  3213. trace_i915_gem_object_change_domain(obj,
  3214. old_read_domains,
  3215. old_write_domain);
  3216. return 0;
  3217. }
  3218. /* Throttle our rendering by waiting until the ring has completed our requests
  3219. * emitted over 20 msec ago.
  3220. *
  3221. * Note that if we were to use the current jiffies each time around the loop,
  3222. * we wouldn't escape the function with any frames outstanding if the time to
  3223. * render a frame was over 20ms.
  3224. *
  3225. * This should get us reasonable parallelism between CPU and GPU but also
  3226. * relatively low latency when blocking on a particular request to finish.
  3227. */
  3228. static int
  3229. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  3230. {
  3231. struct drm_i915_private *dev_priv = dev->dev_private;
  3232. struct drm_i915_file_private *file_priv = file->driver_priv;
  3233. unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
  3234. struct drm_i915_gem_request *request, *target = NULL;
  3235. unsigned reset_counter;
  3236. int ret;
  3237. ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  3238. if (ret)
  3239. return ret;
  3240. ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
  3241. if (ret)
  3242. return ret;
  3243. spin_lock(&file_priv->mm.lock);
  3244. list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
  3245. if (time_after_eq(request->emitted_jiffies, recent_enough))
  3246. break;
  3247. /*
  3248. * Note that the request might not have been submitted yet.
  3249. * In which case emitted_jiffies will be zero.
  3250. */
  3251. if (!request->emitted_jiffies)
  3252. continue;
  3253. target = request;
  3254. }
  3255. reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  3256. if (target)
  3257. i915_gem_request_reference(target);
  3258. spin_unlock(&file_priv->mm.lock);
  3259. if (target == NULL)
  3260. return 0;
  3261. ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
  3262. if (ret == 0)
  3263. queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  3264. i915_gem_request_unreference__unlocked(target);
  3265. return ret;
  3266. }
  3267. static bool
  3268. i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
  3269. {
  3270. struct drm_i915_gem_object *obj = vma->obj;
  3271. if (alignment &&
  3272. vma->node.start & (alignment - 1))
  3273. return true;
  3274. if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
  3275. return true;
  3276. if (flags & PIN_OFFSET_BIAS &&
  3277. vma->node.start < (flags & PIN_OFFSET_MASK))
  3278. return true;
  3279. return false;
  3280. }
  3281. static int
  3282. i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
  3283. struct i915_address_space *vm,
  3284. const struct i915_ggtt_view *ggtt_view,
  3285. uint32_t alignment,
  3286. uint64_t flags)
  3287. {
  3288. struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  3289. struct i915_vma *vma;
  3290. unsigned bound;
  3291. int ret;
  3292. if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
  3293. return -ENODEV;
  3294. if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
  3295. return -EINVAL;
  3296. if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
  3297. return -EINVAL;
  3298. if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
  3299. return -EINVAL;
  3300. vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
  3301. i915_gem_obj_to_vma(obj, vm);
  3302. if (IS_ERR(vma))
  3303. return PTR_ERR(vma);
  3304. if (vma) {
  3305. if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
  3306. return -EBUSY;
  3307. if (i915_vma_misplaced(vma, alignment, flags)) {
  3308. unsigned long offset;
  3309. offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
  3310. i915_gem_obj_offset(obj, vm);
  3311. WARN(vma->pin_count,
  3312. "bo is already pinned in %s with incorrect alignment:"
  3313. " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
  3314. " obj->map_and_fenceable=%d\n",
  3315. ggtt_view ? "ggtt" : "ppgtt",
  3316. offset,
  3317. alignment,
  3318. !!(flags & PIN_MAPPABLE),
  3319. obj->map_and_fenceable);
  3320. ret = i915_vma_unbind(vma);
  3321. if (ret)
  3322. return ret;
  3323. vma = NULL;
  3324. }
  3325. }
  3326. bound = vma ? vma->bound : 0;
  3327. if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
  3328. vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
  3329. flags);
  3330. if (IS_ERR(vma))
  3331. return PTR_ERR(vma);
  3332. } else {
  3333. ret = i915_vma_bind(vma, obj->cache_level, flags);
  3334. if (ret)
  3335. return ret;
  3336. }
  3337. if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
  3338. (bound ^ vma->bound) & GLOBAL_BIND) {
  3339. bool mappable, fenceable;
  3340. u32 fence_size, fence_alignment;
  3341. fence_size = i915_gem_get_gtt_size(obj->base.dev,
  3342. obj->base.size,
  3343. obj->tiling_mode);
  3344. fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
  3345. obj->base.size,
  3346. obj->tiling_mode,
  3347. true);
  3348. fenceable = (vma->node.size == fence_size &&
  3349. (vma->node.start & (fence_alignment - 1)) == 0);
  3350. mappable = (vma->node.start + fence_size <=
  3351. dev_priv->gtt.mappable_end);
  3352. obj->map_and_fenceable = mappable && fenceable;
  3353. WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
  3354. }
  3355. vma->pin_count++;
  3356. return 0;
  3357. }
  3358. int
  3359. i915_gem_object_pin(struct drm_i915_gem_object *obj,
  3360. struct i915_address_space *vm,
  3361. uint32_t alignment,
  3362. uint64_t flags)
  3363. {
  3364. return i915_gem_object_do_pin(obj, vm,
  3365. i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
  3366. alignment, flags);
  3367. }
  3368. int
  3369. i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
  3370. const struct i915_ggtt_view *view,
  3371. uint32_t alignment,
  3372. uint64_t flags)
  3373. {
  3374. if (WARN_ONCE(!view, "no view specified"))
  3375. return -EINVAL;
  3376. return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
  3377. alignment, flags | PIN_GLOBAL);
  3378. }
  3379. void
  3380. i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
  3381. const struct i915_ggtt_view *view)
  3382. {
  3383. struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
  3384. BUG_ON(!vma);
  3385. WARN_ON(vma->pin_count == 0);
  3386. WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
  3387. --vma->pin_count;
  3388. }
  3389. int
  3390. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3391. struct drm_file *file)
  3392. {
  3393. struct drm_i915_gem_busy *args = data;
  3394. struct drm_i915_gem_object *obj;
  3395. int ret;
  3396. ret = i915_mutex_lock_interruptible(dev);
  3397. if (ret)
  3398. return ret;
  3399. obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3400. if (&obj->base == NULL) {
  3401. ret = -ENOENT;
  3402. goto unlock;
  3403. }
  3404. /* Count all active objects as busy, even if they are currently not used
  3405. * by the gpu. Users of this interface expect objects to eventually
  3406. * become non-busy without any further actions, therefore emit any
  3407. * necessary flushes here.
  3408. */
  3409. ret = i915_gem_object_flush_active(obj);
  3410. if (ret)
  3411. goto unref;
  3412. BUILD_BUG_ON(I915_NUM_RINGS > 16);
  3413. args->busy = obj->active << 16;
  3414. if (obj->last_write_req)
  3415. args->busy |= obj->last_write_req->ring->id;
  3416. unref:
  3417. drm_gem_object_unreference(&obj->base);
  3418. unlock:
  3419. mutex_unlock(&dev->struct_mutex);
  3420. return ret;
  3421. }
  3422. int
  3423. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3424. struct drm_file *file_priv)
  3425. {
  3426. return i915_gem_ring_throttle(dev, file_priv);
  3427. }
  3428. int
  3429. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3430. struct drm_file *file_priv)
  3431. {
  3432. struct drm_i915_private *dev_priv = dev->dev_private;
  3433. struct drm_i915_gem_madvise *args = data;
  3434. struct drm_i915_gem_object *obj;
  3435. int ret;
  3436. switch (args->madv) {
  3437. case I915_MADV_DONTNEED:
  3438. case I915_MADV_WILLNEED:
  3439. break;
  3440. default:
  3441. return -EINVAL;
  3442. }
  3443. ret = i915_mutex_lock_interruptible(dev);
  3444. if (ret)
  3445. return ret;
  3446. obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
  3447. if (&obj->base == NULL) {
  3448. ret = -ENOENT;
  3449. goto unlock;
  3450. }
  3451. if (i915_gem_obj_is_pinned(obj)) {
  3452. ret = -EINVAL;
  3453. goto out;
  3454. }
  3455. if (obj->pages &&
  3456. obj->tiling_mode != I915_TILING_NONE &&
  3457. dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
  3458. if (obj->madv == I915_MADV_WILLNEED)
  3459. i915_gem_object_unpin_pages(obj);
  3460. if (args->madv == I915_MADV_WILLNEED)
  3461. i915_gem_object_pin_pages(obj);
  3462. }
  3463. if (obj->madv != __I915_MADV_PURGED)
  3464. obj->madv = args->madv;
  3465. /* if the object is no longer attached, discard its backing storage */
  3466. if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
  3467. i915_gem_object_truncate(obj);
  3468. args->retained = obj->madv != __I915_MADV_PURGED;
  3469. out:
  3470. drm_gem_object_unreference(&obj->base);
  3471. unlock:
  3472. mutex_unlock(&dev->struct_mutex);
  3473. return ret;
  3474. }
  3475. void i915_gem_object_init(struct drm_i915_gem_object *obj,
  3476. const struct drm_i915_gem_object_ops *ops)
  3477. {
  3478. int i;
  3479. INIT_LIST_HEAD(&obj->global_list);
  3480. for (i = 0; i < I915_NUM_RINGS; i++)
  3481. INIT_LIST_HEAD(&obj->ring_list[i]);
  3482. INIT_LIST_HEAD(&obj->obj_exec_link);
  3483. INIT_LIST_HEAD(&obj->vma_list);
  3484. INIT_LIST_HEAD(&obj->batch_pool_link);
  3485. obj->ops = ops;
  3486. obj->fence_reg = I915_FENCE_REG_NONE;
  3487. obj->madv = I915_MADV_WILLNEED;
  3488. i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
  3489. }
  3490. static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
  3491. .get_pages = i915_gem_object_get_pages_gtt,
  3492. .put_pages = i915_gem_object_put_pages_gtt,
  3493. };
  3494. struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
  3495. size_t size)
  3496. {
  3497. struct drm_i915_gem_object *obj;
  3498. struct address_space *mapping;
  3499. gfp_t mask;
  3500. obj = i915_gem_object_alloc(dev);
  3501. if (obj == NULL)
  3502. return NULL;
  3503. if (drm_gem_object_init(dev, &obj->base, size) != 0) {
  3504. i915_gem_object_free(obj);
  3505. return NULL;
  3506. }
  3507. mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
  3508. if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
  3509. /* 965gm cannot relocate objects above 4GiB. */
  3510. mask &= ~__GFP_HIGHMEM;
  3511. mask |= __GFP_DMA32;
  3512. }
  3513. mapping = file_inode(obj->base.filp)->i_mapping;
  3514. mapping_set_gfp_mask(mapping, mask);
  3515. i915_gem_object_init(obj, &i915_gem_object_ops);
  3516. obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3517. obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3518. if (HAS_LLC(dev)) {
  3519. /* On some devices, we can have the GPU use the LLC (the CPU
  3520. * cache) for about a 10% performance improvement
  3521. * compared to uncached. Graphics requests other than
  3522. * display scanout are coherent with the CPU in
  3523. * accessing this cache. This means in this mode we
  3524. * don't need to clflush on the CPU side, and on the
  3525. * GPU side we only need to flush internal caches to
  3526. * get data visible to the CPU.
  3527. *
  3528. * However, we maintain the display planes as UC, and so
  3529. * need to rebind when first used as such.
  3530. */
  3531. obj->cache_level = I915_CACHE_LLC;
  3532. } else
  3533. obj->cache_level = I915_CACHE_NONE;
  3534. trace_i915_gem_object_create(obj);
  3535. return obj;
  3536. }
  3537. static bool discard_backing_storage(struct drm_i915_gem_object *obj)
  3538. {
  3539. /* If we are the last user of the backing storage (be it shmemfs
  3540. * pages or stolen etc), we know that the pages are going to be
  3541. * immediately released. In this case, we can then skip copying
  3542. * back the contents from the GPU.
  3543. */
  3544. if (obj->madv != I915_MADV_WILLNEED)
  3545. return false;
  3546. if (obj->base.filp == NULL)
  3547. return true;
  3548. /* At first glance, this looks racy, but then again so would be
  3549. * userspace racing mmap against close. However, the first external
  3550. * reference to the filp can only be obtained through the
  3551. * i915_gem_mmap_ioctl() which safeguards us against the user
  3552. * acquiring such a reference whilst we are in the middle of
  3553. * freeing the object.
  3554. */
  3555. return atomic_long_read(&obj->base.filp->f_count) == 1;
  3556. }
  3557. void i915_gem_free_object(struct drm_gem_object *gem_obj)
  3558. {
  3559. struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  3560. struct drm_device *dev = obj->base.dev;
  3561. struct drm_i915_private *dev_priv = dev->dev_private;
  3562. struct i915_vma *vma, *next;
  3563. intel_runtime_pm_get(dev_priv);
  3564. trace_i915_gem_object_destroy(obj);
  3565. list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
  3566. int ret;
  3567. vma->pin_count = 0;
  3568. ret = i915_vma_unbind(vma);
  3569. if (WARN_ON(ret == -ERESTARTSYS)) {
  3570. bool was_interruptible;
  3571. was_interruptible = dev_priv->mm.interruptible;
  3572. dev_priv->mm.interruptible = false;
  3573. WARN_ON(i915_vma_unbind(vma));
  3574. dev_priv->mm.interruptible = was_interruptible;
  3575. }
  3576. }
  3577. /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
  3578. * before progressing. */
  3579. if (obj->stolen)
  3580. i915_gem_object_unpin_pages(obj);
  3581. WARN_ON(obj->frontbuffer_bits);
  3582. if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
  3583. dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
  3584. obj->tiling_mode != I915_TILING_NONE)
  3585. i915_gem_object_unpin_pages(obj);
  3586. if (WARN_ON(obj->pages_pin_count))
  3587. obj->pages_pin_count = 0;
  3588. if (discard_backing_storage(obj))
  3589. obj->madv = I915_MADV_DONTNEED;
  3590. i915_gem_object_put_pages(obj);
  3591. i915_gem_object_free_mmap_offset(obj);
  3592. BUG_ON(obj->pages);
  3593. if (obj->base.import_attach)
  3594. drm_prime_gem_destroy(&obj->base, NULL);
  3595. if (obj->ops->release)
  3596. obj->ops->release(obj);
  3597. drm_gem_object_release(&obj->base);
  3598. i915_gem_info_remove_obj(dev_priv, obj->base.size);
  3599. kfree(obj->bit_17);
  3600. i915_gem_object_free(obj);
  3601. intel_runtime_pm_put(dev_priv);
  3602. }
  3603. struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
  3604. struct i915_address_space *vm)
  3605. {
  3606. struct i915_vma *vma;
  3607. list_for_each_entry(vma, &obj->vma_list, vma_link) {
  3608. if (i915_is_ggtt(vma->vm) &&
  3609. vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
  3610. continue;
  3611. if (vma->vm == vm)
  3612. return vma;
  3613. }
  3614. return NULL;
  3615. }
  3616. struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
  3617. const struct i915_ggtt_view *view)
  3618. {
  3619. struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
  3620. struct i915_vma *vma;
  3621. if (WARN_ONCE(!view, "no view specified"))
  3622. return ERR_PTR(-EINVAL);
  3623. list_for_each_entry(vma, &obj->vma_list, vma_link)
  3624. if (vma->vm == ggtt &&
  3625. i915_ggtt_view_equal(&vma->ggtt_view, view))
  3626. return vma;
  3627. return NULL;
  3628. }
  3629. void i915_gem_vma_destroy(struct i915_vma *vma)
  3630. {
  3631. struct i915_address_space *vm = NULL;
  3632. WARN_ON(vma->node.allocated);
  3633. /* Keep the vma as a placeholder in the execbuffer reservation lists */
  3634. if (!list_empty(&vma->exec_list))
  3635. return;
  3636. vm = vma->vm;
  3637. if (!i915_is_ggtt(vm))
  3638. i915_ppgtt_put(i915_vm_to_ppgtt(vm));
  3639. list_del(&vma->vma_link);
  3640. kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
  3641. }
  3642. static void
  3643. i915_gem_stop_ringbuffers(struct drm_device *dev)
  3644. {
  3645. struct drm_i915_private *dev_priv = dev->dev_private;
  3646. struct intel_engine_cs *ring;
  3647. int i;
  3648. for_each_ring(ring, dev_priv, i)
  3649. dev_priv->gt.stop_ring(ring);
  3650. }
  3651. int
  3652. i915_gem_suspend(struct drm_device *dev)
  3653. {
  3654. struct drm_i915_private *dev_priv = dev->dev_private;
  3655. int ret = 0;
  3656. mutex_lock(&dev->struct_mutex);
  3657. ret = i915_gpu_idle(dev);
  3658. if (ret)
  3659. goto err;
  3660. i915_gem_retire_requests(dev);
  3661. i915_gem_stop_ringbuffers(dev);
  3662. mutex_unlock(&dev->struct_mutex);
  3663. cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
  3664. cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  3665. flush_delayed_work(&dev_priv->mm.idle_work);
  3666. /* Assert that we sucessfully flushed all the work and
  3667. * reset the GPU back to its idle, low power state.
  3668. */
  3669. WARN_ON(dev_priv->mm.busy);
  3670. return 0;
  3671. err:
  3672. mutex_unlock(&dev->struct_mutex);
  3673. return ret;
  3674. }
  3675. int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
  3676. {
  3677. struct intel_engine_cs *ring = req->ring;
  3678. struct drm_device *dev = ring->dev;
  3679. struct drm_i915_private *dev_priv = dev->dev_private;
  3680. u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
  3681. u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
  3682. int i, ret;
  3683. if (!HAS_L3_DPF(dev) || !remap_info)
  3684. return 0;
  3685. ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
  3686. if (ret)
  3687. return ret;
  3688. /*
  3689. * Note: We do not worry about the concurrent register cacheline hang
  3690. * here because no other code should access these registers other than
  3691. * at initialization time.
  3692. */
  3693. for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
  3694. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  3695. intel_ring_emit(ring, reg_base + i);
  3696. intel_ring_emit(ring, remap_info[i/4]);
  3697. }
  3698. intel_ring_advance(ring);
  3699. return ret;
  3700. }
  3701. void i915_gem_init_swizzling(struct drm_device *dev)
  3702. {
  3703. struct drm_i915_private *dev_priv = dev->dev_private;
  3704. if (INTEL_INFO(dev)->gen < 5 ||
  3705. dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
  3706. return;
  3707. I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
  3708. DISP_TILE_SURFACE_SWIZZLING);
  3709. if (IS_GEN5(dev))
  3710. return;
  3711. I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
  3712. if (IS_GEN6(dev))
  3713. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
  3714. else if (IS_GEN7(dev))
  3715. I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
  3716. else if (IS_GEN8(dev))
  3717. I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
  3718. else
  3719. BUG();
  3720. }
  3721. static bool
  3722. intel_enable_blt(struct drm_device *dev)
  3723. {
  3724. if (!HAS_BLT(dev))
  3725. return false;
  3726. /* The blitter was dysfunctional on early prototypes */
  3727. if (IS_GEN6(dev) && dev->pdev->revision < 8) {
  3728. DRM_INFO("BLT not supported on this pre-production hardware;"
  3729. " graphics performance will be degraded.\n");
  3730. return false;
  3731. }
  3732. return true;
  3733. }
  3734. static void init_unused_ring(struct drm_device *dev, u32 base)
  3735. {
  3736. struct drm_i915_private *dev_priv = dev->dev_private;
  3737. I915_WRITE(RING_CTL(base), 0);
  3738. I915_WRITE(RING_HEAD(base), 0);
  3739. I915_WRITE(RING_TAIL(base), 0);
  3740. I915_WRITE(RING_START(base), 0);
  3741. }
  3742. static void init_unused_rings(struct drm_device *dev)
  3743. {
  3744. if (IS_I830(dev)) {
  3745. init_unused_ring(dev, PRB1_BASE);
  3746. init_unused_ring(dev, SRB0_BASE);
  3747. init_unused_ring(dev, SRB1_BASE);
  3748. init_unused_ring(dev, SRB2_BASE);
  3749. init_unused_ring(dev, SRB3_BASE);
  3750. } else if (IS_GEN2(dev)) {
  3751. init_unused_ring(dev, SRB0_BASE);
  3752. init_unused_ring(dev, SRB1_BASE);
  3753. } else if (IS_GEN3(dev)) {
  3754. init_unused_ring(dev, PRB1_BASE);
  3755. init_unused_ring(dev, PRB2_BASE);
  3756. }
  3757. }
  3758. int i915_gem_init_rings(struct drm_device *dev)
  3759. {
  3760. struct drm_i915_private *dev_priv = dev->dev_private;
  3761. int ret;
  3762. ret = intel_init_render_ring_buffer(dev);
  3763. if (ret)
  3764. return ret;
  3765. if (HAS_BSD(dev)) {
  3766. ret = intel_init_bsd_ring_buffer(dev);
  3767. if (ret)
  3768. goto cleanup_render_ring;
  3769. }
  3770. if (intel_enable_blt(dev)) {
  3771. ret = intel_init_blt_ring_buffer(dev);
  3772. if (ret)
  3773. goto cleanup_bsd_ring;
  3774. }
  3775. if (HAS_VEBOX(dev)) {
  3776. ret = intel_init_vebox_ring_buffer(dev);
  3777. if (ret)
  3778. goto cleanup_blt_ring;
  3779. }
  3780. if (HAS_BSD2(dev)) {
  3781. ret = intel_init_bsd2_ring_buffer(dev);
  3782. if (ret)
  3783. goto cleanup_vebox_ring;
  3784. }
  3785. ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
  3786. if (ret)
  3787. goto cleanup_bsd2_ring;
  3788. return 0;
  3789. cleanup_bsd2_ring:
  3790. intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
  3791. cleanup_vebox_ring:
  3792. intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
  3793. cleanup_blt_ring:
  3794. intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
  3795. cleanup_bsd_ring:
  3796. intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
  3797. cleanup_render_ring:
  3798. intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
  3799. return ret;
  3800. }
  3801. int
  3802. i915_gem_init_hw(struct drm_device *dev)
  3803. {
  3804. struct drm_i915_private *dev_priv = dev->dev_private;
  3805. struct intel_engine_cs *ring;
  3806. int ret, i, j;
  3807. if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
  3808. return -EIO;
  3809. /* Double layer security blanket, see i915_gem_init() */
  3810. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  3811. if (dev_priv->ellc_size)
  3812. I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
  3813. if (IS_HASWELL(dev))
  3814. I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
  3815. LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
  3816. if (HAS_PCH_NOP(dev)) {
  3817. if (IS_IVYBRIDGE(dev)) {
  3818. u32 temp = I915_READ(GEN7_MSG_CTL);
  3819. temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
  3820. I915_WRITE(GEN7_MSG_CTL, temp);
  3821. } else if (INTEL_INFO(dev)->gen >= 7) {
  3822. u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
  3823. temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
  3824. I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
  3825. }
  3826. }
  3827. i915_gem_init_swizzling(dev);
  3828. /*
  3829. * At least 830 can leave some of the unused rings
  3830. * "active" (ie. head != tail) after resume which
  3831. * will prevent c3 entry. Makes sure all unused rings
  3832. * are totally idle.
  3833. */
  3834. init_unused_rings(dev);
  3835. BUG_ON(!dev_priv->ring[RCS].default_context);
  3836. ret = i915_ppgtt_init_hw(dev);
  3837. if (ret) {
  3838. DRM_ERROR("PPGTT enable HW failed %d\n", ret);
  3839. goto out;
  3840. }
  3841. /* Need to do basic initialisation of all rings first: */
  3842. for_each_ring(ring, dev_priv, i) {
  3843. ret = ring->init_hw(ring);
  3844. if (ret)
  3845. goto out;
  3846. }
  3847. /* Now it is safe to go back round and do everything else: */
  3848. for_each_ring(ring, dev_priv, i) {
  3849. struct drm_i915_gem_request *req;
  3850. WARN_ON(!ring->default_context);
  3851. ret = i915_gem_request_alloc(ring, ring->default_context, &req);
  3852. if (ret) {
  3853. i915_gem_cleanup_ringbuffer(dev);
  3854. goto out;
  3855. }
  3856. if (ring->id == RCS) {
  3857. for (j = 0; j < NUM_L3_SLICES(dev); j++)
  3858. i915_gem_l3_remap(req, j);
  3859. }
  3860. ret = i915_ppgtt_init_ring(req);
  3861. if (ret && ret != -EIO) {
  3862. DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
  3863. i915_gem_request_cancel(req);
  3864. i915_gem_cleanup_ringbuffer(dev);
  3865. goto out;
  3866. }
  3867. ret = i915_gem_context_enable(req);
  3868. if (ret && ret != -EIO) {
  3869. DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
  3870. i915_gem_request_cancel(req);
  3871. i915_gem_cleanup_ringbuffer(dev);
  3872. goto out;
  3873. }
  3874. i915_add_request_no_flush(req);
  3875. }
  3876. out:
  3877. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  3878. return ret;
  3879. }
  3880. int i915_gem_init(struct drm_device *dev)
  3881. {
  3882. struct drm_i915_private *dev_priv = dev->dev_private;
  3883. int ret;
  3884. i915.enable_execlists = intel_sanitize_enable_execlists(dev,
  3885. i915.enable_execlists);
  3886. mutex_lock(&dev->struct_mutex);
  3887. if (IS_VALLEYVIEW(dev)) {
  3888. /* VLVA0 (potential hack), BIOS isn't actually waking us */
  3889. I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
  3890. if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
  3891. VLV_GTLC_ALLOWWAKEACK), 10))
  3892. DRM_DEBUG_DRIVER("allow wake ack timed out\n");
  3893. }
  3894. if (!i915.enable_execlists) {
  3895. dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
  3896. dev_priv->gt.init_rings = i915_gem_init_rings;
  3897. dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
  3898. dev_priv->gt.stop_ring = intel_stop_ring_buffer;
  3899. } else {
  3900. dev_priv->gt.execbuf_submit = intel_execlists_submission;
  3901. dev_priv->gt.init_rings = intel_logical_rings_init;
  3902. dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
  3903. dev_priv->gt.stop_ring = intel_logical_ring_stop;
  3904. }
  3905. /* This is just a security blanket to placate dragons.
  3906. * On some systems, we very sporadically observe that the first TLBs
  3907. * used by the CS may be stale, despite us poking the TLB reset. If
  3908. * we hold the forcewake during initialisation these problems
  3909. * just magically go away.
  3910. */
  3911. intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  3912. ret = i915_gem_init_userptr(dev);
  3913. if (ret)
  3914. goto out_unlock;
  3915. i915_gem_init_global_gtt(dev);
  3916. ret = i915_gem_context_init(dev);
  3917. if (ret)
  3918. goto out_unlock;
  3919. ret = dev_priv->gt.init_rings(dev);
  3920. if (ret)
  3921. goto out_unlock;
  3922. ret = i915_gem_init_hw(dev);
  3923. if (ret == -EIO) {
  3924. /* Allow ring initialisation to fail by marking the GPU as
  3925. * wedged. But we only want to do this where the GPU is angry,
  3926. * for all other failure, such as an allocation failure, bail.
  3927. */
  3928. DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
  3929. atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
  3930. ret = 0;
  3931. }
  3932. out_unlock:
  3933. intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
  3934. mutex_unlock(&dev->struct_mutex);
  3935. return ret;
  3936. }
  3937. void
  3938. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  3939. {
  3940. struct drm_i915_private *dev_priv = dev->dev_private;
  3941. struct intel_engine_cs *ring;
  3942. int i;
  3943. for_each_ring(ring, dev_priv, i)
  3944. dev_priv->gt.cleanup_ring(ring);
  3945. if (i915.enable_execlists)
  3946. /*
  3947. * Neither the BIOS, ourselves or any other kernel
  3948. * expects the system to be in execlists mode on startup,
  3949. * so we need to reset the GPU back to legacy mode.
  3950. */
  3951. intel_gpu_reset(dev);
  3952. }
  3953. static void
  3954. init_ring_lists(struct intel_engine_cs *ring)
  3955. {
  3956. INIT_LIST_HEAD(&ring->active_list);
  3957. INIT_LIST_HEAD(&ring->request_list);
  3958. }
  3959. void i915_init_vm(struct drm_i915_private *dev_priv,
  3960. struct i915_address_space *vm)
  3961. {
  3962. if (!i915_is_ggtt(vm))
  3963. drm_mm_init(&vm->mm, vm->start, vm->total);
  3964. vm->dev = dev_priv->dev;
  3965. INIT_LIST_HEAD(&vm->active_list);
  3966. INIT_LIST_HEAD(&vm->inactive_list);
  3967. INIT_LIST_HEAD(&vm->global_link);
  3968. list_add_tail(&vm->global_link, &dev_priv->vm_list);
  3969. }
  3970. void
  3971. i915_gem_load(struct drm_device *dev)
  3972. {
  3973. struct drm_i915_private *dev_priv = dev->dev_private;
  3974. int i;
  3975. dev_priv->objects =
  3976. kmem_cache_create("i915_gem_object",
  3977. sizeof(struct drm_i915_gem_object), 0,
  3978. SLAB_HWCACHE_ALIGN,
  3979. NULL);
  3980. dev_priv->vmas =
  3981. kmem_cache_create("i915_gem_vma",
  3982. sizeof(struct i915_vma), 0,
  3983. SLAB_HWCACHE_ALIGN,
  3984. NULL);
  3985. dev_priv->requests =
  3986. kmem_cache_create("i915_gem_request",
  3987. sizeof(struct drm_i915_gem_request), 0,
  3988. SLAB_HWCACHE_ALIGN,
  3989. NULL);
  3990. INIT_LIST_HEAD(&dev_priv->vm_list);
  3991. i915_init_vm(dev_priv, &dev_priv->gtt.base);
  3992. INIT_LIST_HEAD(&dev_priv->context_list);
  3993. INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
  3994. INIT_LIST_HEAD(&dev_priv->mm.bound_list);
  3995. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  3996. for (i = 0; i < I915_NUM_RINGS; i++)
  3997. init_ring_lists(&dev_priv->ring[i]);
  3998. for (i = 0; i < I915_MAX_NUM_FENCES; i++)
  3999. INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
  4000. INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  4001. i915_gem_retire_work_handler);
  4002. INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
  4003. i915_gem_idle_work_handler);
  4004. init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
  4005. dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
  4006. if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
  4007. dev_priv->num_fence_regs = 32;
  4008. else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  4009. dev_priv->num_fence_regs = 16;
  4010. else
  4011. dev_priv->num_fence_regs = 8;
  4012. if (intel_vgpu_active(dev))
  4013. dev_priv->num_fence_regs =
  4014. I915_READ(vgtif_reg(avail_rs.fence_num));
  4015. /* Initialize fence registers to zero */
  4016. INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  4017. i915_gem_restore_fences(dev);
  4018. i915_gem_detect_bit_6_swizzle(dev);
  4019. init_waitqueue_head(&dev_priv->pending_flip_queue);
  4020. dev_priv->mm.interruptible = true;
  4021. i915_gem_shrinker_init(dev_priv);
  4022. mutex_init(&dev_priv->fb_tracking.lock);
  4023. }
  4024. void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  4025. {
  4026. struct drm_i915_file_private *file_priv = file->driver_priv;
  4027. /* Clean up our request list when the client is going away, so that
  4028. * later retire_requests won't dereference our soon-to-be-gone
  4029. * file_priv.
  4030. */
  4031. spin_lock(&file_priv->mm.lock);
  4032. while (!list_empty(&file_priv->mm.request_list)) {
  4033. struct drm_i915_gem_request *request;
  4034. request = list_first_entry(&file_priv->mm.request_list,
  4035. struct drm_i915_gem_request,
  4036. client_list);
  4037. list_del(&request->client_list);
  4038. request->file_priv = NULL;
  4039. }
  4040. spin_unlock(&file_priv->mm.lock);
  4041. if (!list_empty(&file_priv->rps.link)) {
  4042. spin_lock(&to_i915(dev)->rps.client_lock);
  4043. list_del(&file_priv->rps.link);
  4044. spin_unlock(&to_i915(dev)->rps.client_lock);
  4045. }
  4046. }
  4047. int i915_gem_open(struct drm_device *dev, struct drm_file *file)
  4048. {
  4049. struct drm_i915_file_private *file_priv;
  4050. int ret;
  4051. DRM_DEBUG_DRIVER("\n");
  4052. file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
  4053. if (!file_priv)
  4054. return -ENOMEM;
  4055. file->driver_priv = file_priv;
  4056. file_priv->dev_priv = dev->dev_private;
  4057. file_priv->file = file;
  4058. INIT_LIST_HEAD(&file_priv->rps.link);
  4059. spin_lock_init(&file_priv->mm.lock);
  4060. INIT_LIST_HEAD(&file_priv->mm.request_list);
  4061. ret = i915_gem_context_open(dev, file);
  4062. if (ret)
  4063. kfree(file_priv);
  4064. return ret;
  4065. }
  4066. /**
  4067. * i915_gem_track_fb - update frontbuffer tracking
  4068. * old: current GEM buffer for the frontbuffer slots
  4069. * new: new GEM buffer for the frontbuffer slots
  4070. * frontbuffer_bits: bitmask of frontbuffer slots
  4071. *
  4072. * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
  4073. * from @old and setting them in @new. Both @old and @new can be NULL.
  4074. */
  4075. void i915_gem_track_fb(struct drm_i915_gem_object *old,
  4076. struct drm_i915_gem_object *new,
  4077. unsigned frontbuffer_bits)
  4078. {
  4079. if (old) {
  4080. WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
  4081. WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
  4082. old->frontbuffer_bits &= ~frontbuffer_bits;
  4083. }
  4084. if (new) {
  4085. WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
  4086. WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
  4087. new->frontbuffer_bits |= frontbuffer_bits;
  4088. }
  4089. }
  4090. /* All the new VM stuff */
  4091. unsigned long
  4092. i915_gem_obj_offset(struct drm_i915_gem_object *o,
  4093. struct i915_address_space *vm)
  4094. {
  4095. struct drm_i915_private *dev_priv = o->base.dev->dev_private;
  4096. struct i915_vma *vma;
  4097. WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
  4098. list_for_each_entry(vma, &o->vma_list, vma_link) {
  4099. if (i915_is_ggtt(vma->vm) &&
  4100. vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
  4101. continue;
  4102. if (vma->vm == vm)
  4103. return vma->node.start;
  4104. }
  4105. WARN(1, "%s vma for this object not found.\n",
  4106. i915_is_ggtt(vm) ? "global" : "ppgtt");
  4107. return -1;
  4108. }
  4109. unsigned long
  4110. i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
  4111. const struct i915_ggtt_view *view)
  4112. {
  4113. struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
  4114. struct i915_vma *vma;
  4115. list_for_each_entry(vma, &o->vma_list, vma_link)
  4116. if (vma->vm == ggtt &&
  4117. i915_ggtt_view_equal(&vma->ggtt_view, view))
  4118. return vma->node.start;
  4119. WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
  4120. return -1;
  4121. }
  4122. bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
  4123. struct i915_address_space *vm)
  4124. {
  4125. struct i915_vma *vma;
  4126. list_for_each_entry(vma, &o->vma_list, vma_link) {
  4127. if (i915_is_ggtt(vma->vm) &&
  4128. vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
  4129. continue;
  4130. if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
  4131. return true;
  4132. }
  4133. return false;
  4134. }
  4135. bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
  4136. const struct i915_ggtt_view *view)
  4137. {
  4138. struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
  4139. struct i915_vma *vma;
  4140. list_for_each_entry(vma, &o->vma_list, vma_link)
  4141. if (vma->vm == ggtt &&
  4142. i915_ggtt_view_equal(&vma->ggtt_view, view) &&
  4143. drm_mm_node_allocated(&vma->node))
  4144. return true;
  4145. return false;
  4146. }
  4147. bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
  4148. {
  4149. struct i915_vma *vma;
  4150. list_for_each_entry(vma, &o->vma_list, vma_link)
  4151. if (drm_mm_node_allocated(&vma->node))
  4152. return true;
  4153. return false;
  4154. }
  4155. unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
  4156. struct i915_address_space *vm)
  4157. {
  4158. struct drm_i915_private *dev_priv = o->base.dev->dev_private;
  4159. struct i915_vma *vma;
  4160. WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
  4161. BUG_ON(list_empty(&o->vma_list));
  4162. list_for_each_entry(vma, &o->vma_list, vma_link) {
  4163. if (i915_is_ggtt(vma->vm) &&
  4164. vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
  4165. continue;
  4166. if (vma->vm == vm)
  4167. return vma->node.size;
  4168. }
  4169. return 0;
  4170. }
  4171. bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
  4172. {
  4173. struct i915_vma *vma;
  4174. list_for_each_entry(vma, &obj->vma_list, vma_link)
  4175. if (vma->pin_count > 0)
  4176. return true;
  4177. return false;
  4178. }
  4179. /* Allocate a new GEM object and fill it with the supplied data */
  4180. struct drm_i915_gem_object *
  4181. i915_gem_object_create_from_data(struct drm_device *dev,
  4182. const void *data, size_t size)
  4183. {
  4184. struct drm_i915_gem_object *obj;
  4185. struct sg_table *sg;
  4186. size_t bytes;
  4187. int ret;
  4188. obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
  4189. if (IS_ERR_OR_NULL(obj))
  4190. return obj;
  4191. ret = i915_gem_object_set_to_cpu_domain(obj, true);
  4192. if (ret)
  4193. goto fail;
  4194. ret = i915_gem_object_get_pages(obj);
  4195. if (ret)
  4196. goto fail;
  4197. i915_gem_object_pin_pages(obj);
  4198. sg = obj->pages;
  4199. bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
  4200. i915_gem_object_unpin_pages(obj);
  4201. if (WARN_ON(bytes != size)) {
  4202. DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
  4203. ret = -EFAULT;
  4204. goto fail;
  4205. }
  4206. return obj;
  4207. fail:
  4208. drm_gem_object_unreference(&obj->base);
  4209. return ERR_PTR(ret);
  4210. }