12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575 |
- /*
- * Copyright © 2010 Daniel Vetter
- * Copyright © 2011-2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
- #include <linux/slab.h> /* fault-inject.h is not standalone! */
- #include <linux/fault-inject.h>
- #include <linux/log2.h>
- #include <linux/random.h>
- #include <linux/seq_file.h>
- #include <linux/stop_machine.h>
- #include <asm/set_memory.h>
- #include <drm/drmP.h>
- #include <drm/i915_drm.h>
- #include "i915_drv.h"
- #include "i915_vgpu.h"
- #include "i915_trace.h"
- #include "intel_drv.h"
- #include "intel_frontbuffer.h"
- #define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
- /**
- * DOC: Global GTT views
- *
- * Background and previous state
- *
- * Historically objects could exists (be bound) in global GTT space only as
- * singular instances with a view representing all of the object's backing pages
- * in a linear fashion. This view will be called a normal view.
- *
- * To support multiple views of the same object, where the number of mapped
- * pages is not equal to the backing store, or where the layout of the pages
- * is not linear, concept of a GGTT view was added.
- *
- * One example of an alternative view is a stereo display driven by a single
- * image. In this case we would have a framebuffer looking like this
- * (2x2 pages):
- *
- * 12
- * 34
- *
- * Above would represent a normal GGTT view as normally mapped for GPU or CPU
- * rendering. In contrast, fed to the display engine would be an alternative
- * view which could look something like this:
- *
- * 1212
- * 3434
- *
- * In this example both the size and layout of pages in the alternative view is
- * different from the normal view.
- *
- * Implementation and usage
- *
- * GGTT views are implemented using VMAs and are distinguished via enum
- * i915_ggtt_view_type and struct i915_ggtt_view.
- *
- * A new flavour of core GEM functions which work with GGTT bound objects were
- * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
- * renaming in large amounts of code. They take the struct i915_ggtt_view
- * parameter encapsulating all metadata required to implement a view.
- *
- * As a helper for callers which are only interested in the normal view,
- * globally const i915_ggtt_view_normal singleton instance exists. All old core
- * GEM API functions, the ones not taking the view parameter, are operating on,
- * or with the normal GGTT view.
- *
- * Code wanting to add or use a new GGTT view needs to:
- *
- * 1. Add a new enum with a suitable name.
- * 2. Extend the metadata in the i915_ggtt_view structure if required.
- * 3. Add support to i915_get_vma_pages().
- *
- * New views are required to build a scatter-gather table from within the
- * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
- * exists for the lifetime of an VMA.
- *
- * Core API is designed to have copy semantics which means that passed in
- * struct i915_ggtt_view does not need to be persistent (left around after
- * calling the core API functions).
- *
- */
- static int
- i915_get_ggtt_vma_pages(struct i915_vma *vma);
- static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
- {
- /* Note that as an uncached mmio write, this should flush the
- * WCB of the writes into the GGTT before it triggers the invalidate.
- */
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- }
- static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
- {
- gen6_ggtt_invalidate(dev_priv);
- I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
- }
- static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
- {
- intel_gtt_chipset_flush();
- }
- static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
- {
- i915->ggtt.invalidate(i915);
- }
- int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
- int enable_ppgtt)
- {
- bool has_aliasing_ppgtt;
- bool has_full_ppgtt;
- bool has_full_48bit_ppgtt;
- has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
- has_full_ppgtt = dev_priv->info.has_full_ppgtt;
- has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
- if (intel_vgpu_active(dev_priv)) {
- /* GVT-g has no support for 32bit ppgtt */
- has_full_ppgtt = false;
- has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
- }
- if (!has_aliasing_ppgtt)
- return 0;
- /*
- * We don't allow disabling PPGTT for gen9+ as it's a requirement for
- * execlists, the sole mechanism available to submit work.
- */
- if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
- return 0;
- if (enable_ppgtt == 1)
- return 1;
- if (enable_ppgtt == 2 && has_full_ppgtt)
- return 2;
- if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
- return 3;
- /* Disable ppgtt on SNB if VT-d is on. */
- if (IS_GEN6(dev_priv) && intel_vtd_active()) {
- DRM_INFO("Disabling PPGTT because VT-d is on\n");
- return 0;
- }
- /* Early VLV doesn't have this */
- if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
- DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
- return 0;
- }
- if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) {
- if (has_full_48bit_ppgtt)
- return 3;
- if (has_full_ppgtt)
- return 2;
- }
- return has_aliasing_ppgtt ? 1 : 0;
- }
- static int ppgtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
- {
- u32 pte_flags;
- int ret;
- if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
- ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
- vma->size);
- if (ret)
- return ret;
- }
- vma->pages = vma->obj->mm.pages;
- /* Currently applicable only to VLV */
- pte_flags = 0;
- if (vma->obj->gt_ro)
- pte_flags |= PTE_READ_ONLY;
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
- return 0;
- }
- static void ppgtt_unbind_vma(struct i915_vma *vma)
- {
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
- }
- static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level)
- {
- gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
- pte |= addr;
- switch (level) {
- case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED_INDEX;
- break;
- case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC_INDEX;
- break;
- default:
- pte |= PPAT_CACHED_INDEX;
- break;
- }
- return pte;
- }
- static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
- const enum i915_cache_level level)
- {
- gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
- pde |= addr;
- if (level != I915_CACHE_NONE)
- pde |= PPAT_CACHED_PDE_INDEX;
- else
- pde |= PPAT_UNCACHED_INDEX;
- return pde;
- }
- #define gen8_pdpe_encode gen8_pde_encode
- #define gen8_pml4e_encode gen8_pde_encode
- static gen6_pte_t snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
- {
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
- switch (level) {
- case I915_CACHE_L3_LLC:
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
- return pte;
- }
- static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
- {
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
- switch (level) {
- case I915_CACHE_L3_LLC:
- pte |= GEN7_PTE_CACHE_L3_LLC;
- break;
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
- return pte;
- }
- static gen6_pte_t byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
- {
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
- if (!(flags & PTE_READ_ONLY))
- pte |= BYT_PTE_WRITEABLE;
- if (level != I915_CACHE_NONE)
- pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
- return pte;
- }
- static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
- {
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= HSW_PTE_ADDR_ENCODE(addr);
- if (level != I915_CACHE_NONE)
- pte |= HSW_WB_LLC_AGE3;
- return pte;
- }
- static gen6_pte_t iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
- {
- gen6_pte_t pte = GEN6_PTE_VALID;
- pte |= HSW_PTE_ADDR_ENCODE(addr);
- switch (level) {
- case I915_CACHE_NONE:
- break;
- case I915_CACHE_WT:
- pte |= HSW_WT_ELLC_LLC_AGE3;
- break;
- default:
- pte |= HSW_WB_ELLC_LLC_AGE3;
- break;
- }
- return pte;
- }
- static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
- {
- struct page *page;
- if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
- i915_gem_shrink_all(vm->i915);
- if (vm->free_pages.nr)
- return vm->free_pages.pages[--vm->free_pages.nr];
- page = alloc_page(gfp);
- if (!page)
- return NULL;
- if (vm->pt_kmap_wc)
- set_pages_array_wc(&page, 1);
- return page;
- }
- static void vm_free_pages_release(struct i915_address_space *vm)
- {
- GEM_BUG_ON(!pagevec_count(&vm->free_pages));
- if (vm->pt_kmap_wc)
- set_pages_array_wb(vm->free_pages.pages,
- pagevec_count(&vm->free_pages));
- __pagevec_release(&vm->free_pages);
- }
- static void vm_free_page(struct i915_address_space *vm, struct page *page)
- {
- if (!pagevec_add(&vm->free_pages, page))
- vm_free_pages_release(vm);
- }
- static int __setup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p,
- gfp_t gfp)
- {
- p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
- if (unlikely(!p->page))
- return -ENOMEM;
- p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
- vm_free_page(vm, p->page);
- return -ENOMEM;
- }
- return 0;
- }
- static int setup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p)
- {
- return __setup_page_dma(vm, p, I915_GFP_DMA);
- }
- static void cleanup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p)
- {
- dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- vm_free_page(vm, p->page);
- }
- #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
- #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
- #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
- #define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
- #define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
- static void fill_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p,
- const u64 val)
- {
- u64 * const vaddr = kmap_atomic(p->page);
- int i;
- for (i = 0; i < 512; i++)
- vaddr[i] = val;
- kunmap_atomic(vaddr);
- }
- static void fill_page_dma_32(struct i915_address_space *vm,
- struct i915_page_dma *p,
- const u32 v)
- {
- fill_page_dma(vm, p, (u64)v << 32 | v);
- }
- static int
- setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
- {
- return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
- }
- static void cleanup_scratch_page(struct i915_address_space *vm)
- {
- cleanup_page_dma(vm, &vm->scratch_page);
- }
- static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
- {
- struct i915_page_table *pt;
- pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
- if (unlikely(!pt))
- return ERR_PTR(-ENOMEM);
- if (unlikely(setup_px(vm, pt))) {
- kfree(pt);
- return ERR_PTR(-ENOMEM);
- }
- pt->used_ptes = 0;
- return pt;
- }
- static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
- {
- cleanup_px(vm, pt);
- kfree(pt);
- }
- static void gen8_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
- {
- fill_px(vm, pt,
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
- }
- static void gen6_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
- {
- fill32_px(vm, pt,
- vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
- }
- static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
- {
- struct i915_page_directory *pd;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
- if (unlikely(!pd))
- return ERR_PTR(-ENOMEM);
- if (unlikely(setup_px(vm, pd))) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
- }
- pd->used_pdes = 0;
- return pd;
- }
- static void free_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
- {
- cleanup_px(vm, pd);
- kfree(pd);
- }
- static void gen8_initialize_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
- {
- unsigned int i;
- fill_px(vm, pd,
- gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
- for (i = 0; i < I915_PDES; i++)
- pd->page_table[i] = vm->scratch_pt;
- }
- static int __pdp_init(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
- {
- const unsigned int pdpes = i915_pdpes_per_pdp(vm);
- unsigned int i;
- pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
- GFP_KERNEL | __GFP_NOWARN);
- if (unlikely(!pdp->page_directory))
- return -ENOMEM;
- for (i = 0; i < pdpes; i++)
- pdp->page_directory[i] = vm->scratch_pd;
- return 0;
- }
- static void __pdp_fini(struct i915_page_directory_pointer *pdp)
- {
- kfree(pdp->page_directory);
- pdp->page_directory = NULL;
- }
- static inline bool use_4lvl(const struct i915_address_space *vm)
- {
- return i915_vm_is_48bit(vm);
- }
- static struct i915_page_directory_pointer *
- alloc_pdp(struct i915_address_space *vm)
- {
- struct i915_page_directory_pointer *pdp;
- int ret = -ENOMEM;
- WARN_ON(!use_4lvl(vm));
- pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
- if (!pdp)
- return ERR_PTR(-ENOMEM);
- ret = __pdp_init(vm, pdp);
- if (ret)
- goto fail_bitmap;
- ret = setup_px(vm, pdp);
- if (ret)
- goto fail_page_m;
- return pdp;
- fail_page_m:
- __pdp_fini(pdp);
- fail_bitmap:
- kfree(pdp);
- return ERR_PTR(ret);
- }
- static void free_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
- {
- __pdp_fini(pdp);
- if (!use_4lvl(vm))
- return;
- cleanup_px(vm, pdp);
- kfree(pdp);
- }
- static void gen8_initialize_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
- {
- gen8_ppgtt_pdpe_t scratch_pdpe;
- scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- fill_px(vm, pdp, scratch_pdpe);
- }
- static void gen8_initialize_pml4(struct i915_address_space *vm,
- struct i915_pml4 *pml4)
- {
- unsigned int i;
- fill_px(vm, pml4,
- gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
- for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
- pml4->pdps[i] = vm->scratch_pdp;
- }
- /* Broadwell Page Directory Pointer Descriptors */
- static int gen8_write_pdp(struct drm_i915_gem_request *req,
- unsigned entry,
- dma_addr_t addr)
- {
- struct intel_engine_cs *engine = req->engine;
- u32 *cs;
- BUG_ON(entry >= 4);
- cs = intel_ring_begin(req, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
- *cs++ = upper_32_bits(addr);
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
- *cs++ = lower_32_bits(addr);
- intel_ring_advance(req, cs);
- return 0;
- }
- static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
- {
- int i, ret;
- for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- ret = gen8_write_pdp(req, i, pd_daddr);
- if (ret)
- return ret;
- }
- return 0;
- }
- static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
- {
- return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
- }
- /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
- * the page table structures, we mark them dirty so that
- * context switching/execlist queuing code takes extra steps
- * to ensure that tlbs are flushed.
- */
- static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
- {
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
- }
- /* Removes entries from a single page table, releasing it if it's empty.
- * Caller can use the return value to update higher-level entries.
- */
- static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
- struct i915_page_table *pt,
- u64 start, u64 length)
- {
- unsigned int num_entries = gen8_pte_count(start, length);
- unsigned int pte = gen8_pte_index(start);
- unsigned int pte_end = pte + num_entries;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
- gen8_pte_t *vaddr;
- GEM_BUG_ON(num_entries > pt->used_ptes);
- pt->used_ptes -= num_entries;
- if (!pt->used_ptes)
- return true;
- vaddr = kmap_atomic_px(pt);
- while (pte < pte_end)
- vaddr[pte++] = scratch_pte;
- kunmap_atomic(vaddr);
- return false;
- }
- static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- struct i915_page_table *pt,
- unsigned int pde)
- {
- gen8_pde_t *vaddr;
- pd->page_table[pde] = pt;
- vaddr = kmap_atomic_px(pd);
- vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
- }
- static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- u64 start, u64 length)
- {
- struct i915_page_table *pt;
- u32 pde;
- gen8_for_each_pde(pt, pd, start, length, pde) {
- GEM_BUG_ON(pt == vm->scratch_pt);
- if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
- continue;
- gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
- GEM_BUG_ON(!pd->used_pdes);
- pd->used_pdes--;
- free_pt(vm, pt);
- }
- return !pd->used_pdes;
- }
- static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
- struct i915_page_directory *pd,
- unsigned int pdpe)
- {
- gen8_ppgtt_pdpe_t *vaddr;
- pdp->page_directory[pdpe] = pd;
- if (!use_4lvl(vm))
- return;
- vaddr = kmap_atomic_px(pdp);
- vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
- }
- /* Removes entries from a single page dir pointer, releasing it if it's empty.
- * Caller can use the return value to update higher-level entries
- */
- static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
- u64 start, u64 length)
- {
- struct i915_page_directory *pd;
- unsigned int pdpe;
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- GEM_BUG_ON(pd == vm->scratch_pd);
- if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
- continue;
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
- GEM_BUG_ON(!pdp->used_pdpes);
- pdp->used_pdpes--;
- free_pd(vm, pd);
- }
- return !pdp->used_pdpes;
- }
- static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
- u64 start, u64 length)
- {
- gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
- }
- static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
- struct i915_page_directory_pointer *pdp,
- unsigned int pml4e)
- {
- gen8_ppgtt_pml4e_t *vaddr;
- pml4->pdps[pml4e] = pdp;
- vaddr = kmap_atomic_px(pml4);
- vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
- }
- /* Removes entries from a single pml4.
- * This is the top-level structure in 4-level page tables used on gen8+.
- * Empty entries are always scratch pml4e.
- */
- static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
- u64 start, u64 length)
- {
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_pml4 *pml4 = &ppgtt->pml4;
- struct i915_page_directory_pointer *pdp;
- unsigned int pml4e;
- GEM_BUG_ON(!use_4lvl(vm));
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- GEM_BUG_ON(pdp == vm->scratch_pdp);
- if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
- continue;
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- free_pdp(vm, pdp);
- }
- }
- static inline struct sgt_dma {
- struct scatterlist *sg;
- dma_addr_t dma, max;
- } sgt_dma(struct i915_vma *vma) {
- struct scatterlist *sg = vma->pages->sgl;
- dma_addr_t addr = sg_dma_address(sg);
- return (struct sgt_dma) { sg, addr, addr + sg->length };
- }
- struct gen8_insert_pte {
- u16 pml4e;
- u16 pdpe;
- u16 pde;
- u16 pte;
- };
- static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
- {
- return (struct gen8_insert_pte) {
- gen8_pml4e_index(start),
- gen8_pdpe_index(start),
- gen8_pde_index(start),
- gen8_pte_index(start),
- };
- }
- static __always_inline bool
- gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
- struct i915_page_directory_pointer *pdp,
- struct sgt_dma *iter,
- struct gen8_insert_pte *idx,
- enum i915_cache_level cache_level)
- {
- struct i915_page_directory *pd;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
- gen8_pte_t *vaddr;
- bool ret;
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
- pd = pdp->page_directory[idx->pdpe];
- vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
- do {
- vaddr[idx->pte] = pte_encode | iter->dma;
- iter->dma += PAGE_SIZE;
- if (iter->dma >= iter->max) {
- iter->sg = __sg_next(iter->sg);
- if (!iter->sg) {
- ret = false;
- break;
- }
- iter->dma = sg_dma_address(iter->sg);
- iter->max = iter->dma + iter->sg->length;
- }
- if (++idx->pte == GEN8_PTES) {
- idx->pte = 0;
- if (++idx->pde == I915_PDES) {
- idx->pde = 0;
- /* Limited by sg length for 3lvl */
- if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
- idx->pdpe = 0;
- ret = true;
- break;
- }
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
- pd = pdp->page_directory[idx->pdpe];
- }
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
- }
- } while (1);
- kunmap_atomic(vaddr);
- return ret;
- }
- static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
- {
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = sgt_dma(vma);
- struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
- gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
- cache_level);
- }
- static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
- {
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = sgt_dma(vma);
- struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
- struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
- while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
- &idx, cache_level))
- GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
- }
- static void gen8_free_page_tables(struct i915_address_space *vm,
- struct i915_page_directory *pd)
- {
- int i;
- if (!px_page(pd))
- return;
- for (i = 0; i < I915_PDES; i++) {
- if (pd->page_table[i] != vm->scratch_pt)
- free_pt(vm, pd->page_table[i]);
- }
- }
- static int gen8_init_scratch(struct i915_address_space *vm)
- {
- int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
- if (ret)
- return ret;
- vm->scratch_pt = alloc_pt(vm);
- if (IS_ERR(vm->scratch_pt)) {
- ret = PTR_ERR(vm->scratch_pt);
- goto free_scratch_page;
- }
- vm->scratch_pd = alloc_pd(vm);
- if (IS_ERR(vm->scratch_pd)) {
- ret = PTR_ERR(vm->scratch_pd);
- goto free_pt;
- }
- if (use_4lvl(vm)) {
- vm->scratch_pdp = alloc_pdp(vm);
- if (IS_ERR(vm->scratch_pdp)) {
- ret = PTR_ERR(vm->scratch_pdp);
- goto free_pd;
- }
- }
- gen8_initialize_pt(vm, vm->scratch_pt);
- gen8_initialize_pd(vm, vm->scratch_pd);
- if (use_4lvl(vm))
- gen8_initialize_pdp(vm, vm->scratch_pdp);
- return 0;
- free_pd:
- free_pd(vm, vm->scratch_pd);
- free_pt:
- free_pt(vm, vm->scratch_pt);
- free_scratch_page:
- cleanup_scratch_page(vm);
- return ret;
- }
- static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
- {
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = vm->i915;
- enum vgt_g2v_type msg;
- int i;
- if (use_4lvl(vm)) {
- const u64 daddr = px_dma(&ppgtt->pml4);
- I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
- msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
- } else {
- for (i = 0; i < GEN8_3LVL_PDPES; i++) {
- const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
- I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
- }
- msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
- }
- I915_WRITE(vgtif_reg(g2v_notify), msg);
- return 0;
- }
- static void gen8_free_scratch(struct i915_address_space *vm)
- {
- if (use_4lvl(vm))
- free_pdp(vm, vm->scratch_pdp);
- free_pd(vm, vm->scratch_pd);
- free_pt(vm, vm->scratch_pt);
- cleanup_scratch_page(vm);
- }
- static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
- {
- const unsigned int pdpes = i915_pdpes_per_pdp(vm);
- int i;
- for (i = 0; i < pdpes; i++) {
- if (pdp->page_directory[i] == vm->scratch_pd)
- continue;
- gen8_free_page_tables(vm, pdp->page_directory[i]);
- free_pd(vm, pdp->page_directory[i]);
- }
- free_pdp(vm, pdp);
- }
- static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
- {
- int i;
- for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
- continue;
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
- }
- cleanup_px(&ppgtt->base, &ppgtt->pml4);
- }
- static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
- {
- struct drm_i915_private *dev_priv = vm->i915;
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(dev_priv))
- gen8_ppgtt_notify_vgt(ppgtt, false);
- if (use_4lvl(vm))
- gen8_ppgtt_cleanup_4lvl(ppgtt);
- else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
- gen8_free_scratch(vm);
- }
- static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- u64 start, u64 length)
- {
- struct i915_page_table *pt;
- u64 from = start;
- unsigned int pde;
- gen8_for_each_pde(pt, pd, start, length, pde) {
- if (pt == vm->scratch_pt) {
- pt = alloc_pt(vm);
- if (IS_ERR(pt))
- goto unwind;
- gen8_initialize_pt(vm, pt);
- gen8_ppgtt_set_pde(vm, pd, pt, pde);
- pd->used_pdes++;
- GEM_BUG_ON(pd->used_pdes > I915_PDES);
- }
- pt->used_ptes += gen8_pte_count(start, length);
- }
- return 0;
- unwind:
- gen8_ppgtt_clear_pd(vm, pd, from, start - from);
- return -ENOMEM;
- }
- static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
- u64 start, u64 length)
- {
- struct i915_page_directory *pd;
- u64 from = start;
- unsigned int pdpe;
- int ret;
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- if (pd == vm->scratch_pd) {
- pd = alloc_pd(vm);
- if (IS_ERR(pd))
- goto unwind;
- gen8_initialize_pd(vm, pd);
- gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
- pdp->used_pdpes++;
- GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
- mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
- }
- ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
- if (unlikely(ret))
- goto unwind_pd;
- }
- return 0;
- unwind_pd:
- if (!pd->used_pdes) {
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
- GEM_BUG_ON(!pdp->used_pdpes);
- pdp->used_pdpes--;
- free_pd(vm, pd);
- }
- unwind:
- gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
- return -ENOMEM;
- }
- static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
- u64 start, u64 length)
- {
- return gen8_ppgtt_alloc_pdp(vm,
- &i915_vm_to_ppgtt(vm)->pdp, start, length);
- }
- static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
- u64 start, u64 length)
- {
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_pml4 *pml4 = &ppgtt->pml4;
- struct i915_page_directory_pointer *pdp;
- u64 from = start;
- u32 pml4e;
- int ret;
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pml4->pdps[pml4e] == vm->scratch_pdp) {
- pdp = alloc_pdp(vm);
- if (IS_ERR(pdp))
- goto unwind;
- gen8_initialize_pdp(vm, pdp);
- gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
- }
- ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
- if (unlikely(ret))
- goto unwind_pdp;
- }
- return 0;
- unwind_pdp:
- if (!pdp->used_pdpes) {
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- free_pdp(vm, pdp);
- }
- unwind:
- gen8_ppgtt_clear_4lvl(vm, from, start - from);
- return -ENOMEM;
- }
- static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
- struct i915_page_directory_pointer *pdp,
- u64 start, u64 length,
- gen8_pte_t scratch_pte,
- struct seq_file *m)
- {
- struct i915_address_space *vm = &ppgtt->base;
- struct i915_page_directory *pd;
- u32 pdpe;
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- struct i915_page_table *pt;
- u64 pd_len = length;
- u64 pd_start = start;
- u32 pde;
- if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
- continue;
- seq_printf(m, "\tPDPE #%d\n", pdpe);
- gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
- u32 pte;
- gen8_pte_t *pt_vaddr;
- if (pd->page_table[pde] == ppgtt->base.scratch_pt)
- continue;
- pt_vaddr = kmap_atomic_px(pt);
- for (pte = 0; pte < GEN8_PTES; pte += 4) {
- u64 va = (pdpe << GEN8_PDPE_SHIFT |
- pde << GEN8_PDE_SHIFT |
- pte << GEN8_PTE_SHIFT);
- int i;
- bool found = false;
- for (i = 0; i < 4; i++)
- if (pt_vaddr[pte + i] != scratch_pte)
- found = true;
- if (!found)
- continue;
- seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
- for (i = 0; i < 4; i++) {
- if (pt_vaddr[pte + i] != scratch_pte)
- seq_printf(m, " %llx", pt_vaddr[pte + i]);
- else
- seq_puts(m, " SCRATCH ");
- }
- seq_puts(m, "\n");
- }
- kunmap_atomic(pt_vaddr);
- }
- }
- }
- static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
- {
- struct i915_address_space *vm = &ppgtt->base;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
- u64 start = 0, length = ppgtt->base.total;
- if (use_4lvl(vm)) {
- u64 pml4e;
- struct i915_pml4 *pml4 = &ppgtt->pml4;
- struct i915_page_directory_pointer *pdp;
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
- continue;
- seq_printf(m, " PML4E #%llu\n", pml4e);
- gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
- }
- } else {
- gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
- }
- }
- static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
- {
- struct i915_address_space *vm = &ppgtt->base;
- struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
- struct i915_page_directory *pd;
- u64 start = 0, length = ppgtt->base.total;
- u64 from = start;
- unsigned int pdpe;
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- pd = alloc_pd(vm);
- if (IS_ERR(pd))
- goto unwind;
- gen8_initialize_pd(vm, pd);
- gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
- pdp->used_pdpes++;
- }
- pdp->used_pdpes++; /* never remove */
- return 0;
- unwind:
- start -= from;
- gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
- free_pd(vm, pd);
- }
- pdp->used_pdpes = 0;
- return -ENOMEM;
- }
- /*
- * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
- * with a net effect resembling a 2-level page table in normal x86 terms. Each
- * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
- * space.
- *
- */
- static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
- {
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = vm->i915;
- int ret;
- ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
- 1ULL << 48 :
- 1ULL << 32;
- ret = gen8_init_scratch(&ppgtt->base);
- if (ret) {
- ppgtt->base.total = 0;
- return ret;
- }
- /* There are only few exceptions for gen >=6. chv and bxt.
- * And we are not sure about the latter so play safe for now.
- */
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
- ppgtt->base.pt_kmap_wc = true;
- if (use_4lvl(vm)) {
- ret = setup_px(&ppgtt->base, &ppgtt->pml4);
- if (ret)
- goto free_scratch;
- gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
- ppgtt->switch_mm = gen8_mm_switch_4lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
- } else {
- ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
- if (ret)
- goto free_scratch;
- if (intel_vgpu_active(dev_priv)) {
- ret = gen8_preallocate_top_level_pdp(ppgtt);
- if (ret) {
- __pdp_fini(&ppgtt->pdp);
- goto free_scratch;
- }
- }
- ppgtt->switch_mm = gen8_mm_switch_3lvl;
- ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
- ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
- }
- if (intel_vgpu_active(dev_priv))
- gen8_ppgtt_notify_vgt(ppgtt, true);
- ppgtt->base.cleanup = gen8_ppgtt_cleanup;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->debug_dump = gen8_dump_ppgtt;
- return 0;
- free_scratch:
- gen8_free_scratch(&ppgtt->base);
- return ret;
- }
- static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
- {
- struct i915_address_space *vm = &ppgtt->base;
- struct i915_page_table *unused;
- gen6_pte_t scratch_pte;
- u32 pd_entry, pte, pde;
- u32 start = 0, length = ppgtt->base.total;
- scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, 0);
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
- u32 expected;
- gen6_pte_t *pt_vaddr;
- const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
- pd_entry = readl(ppgtt->pd_addr + pde);
- expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
- if (pd_entry != expected)
- seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
- pde,
- pd_entry,
- expected);
- seq_printf(m, "\tPDE: %x\n", pd_entry);
- pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
- for (pte = 0; pte < GEN6_PTES; pte+=4) {
- unsigned long va =
- (pde * PAGE_SIZE * GEN6_PTES) +
- (pte * PAGE_SIZE);
- int i;
- bool found = false;
- for (i = 0; i < 4; i++)
- if (pt_vaddr[pte + i] != scratch_pte)
- found = true;
- if (!found)
- continue;
- seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
- for (i = 0; i < 4; i++) {
- if (pt_vaddr[pte + i] != scratch_pte)
- seq_printf(m, " %08x", pt_vaddr[pte + i]);
- else
- seq_puts(m, " SCRATCH ");
- }
- seq_puts(m, "\n");
- }
- kunmap_atomic(pt_vaddr);
- }
- }
- /* Write pde (index) from the page directory @pd to the page table @pt */
- static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
- const unsigned int pde,
- const struct i915_page_table *pt)
- {
- /* Caller needs to make sure the write completes if necessary */
- writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
- ppgtt->pd_addr + pde);
- }
- /* Write all the page tables found in the ppgtt structure to incrementing page
- * directories. */
- static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
- u32 start, u32 length)
- {
- struct i915_page_table *pt;
- unsigned int pde;
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
- gen6_write_pde(ppgtt, pde, pt);
- mark_tlbs_dirty(ppgtt);
- wmb();
- }
- static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
- {
- GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
- return ppgtt->pd.base.ggtt_offset << 10;
- }
- static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
- {
- struct intel_engine_cs *engine = req->engine;
- u32 *cs;
- /* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(req, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(2);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
- *cs++ = PP_DIR_DCLV_2G;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = get_pd_offset(ppgtt);
- *cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
- return 0;
- }
- static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
- {
- struct intel_engine_cs *engine = req->engine;
- u32 *cs;
- /* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(req, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(2);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
- *cs++ = PP_DIR_DCLV_2G;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = get_pd_offset(ppgtt);
- *cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
- return 0;
- }
- static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
- {
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = req->i915;
- I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
- return 0;
- }
- static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
- {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
- u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
- GEN8_GFX_PPGTT_48B : 0;
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
- }
- }
- static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
- {
- struct intel_engine_cs *engine;
- u32 ecochk, ecobits;
- enum intel_engine_id id;
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
- ecochk = I915_READ(GAM_ECOCHK);
- if (IS_HASWELL(dev_priv)) {
- ecochk |= ECOCHK_PPGTT_WB_HSW;
- } else {
- ecochk |= ECOCHK_PPGTT_LLC_IVB;
- ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
- }
- I915_WRITE(GAM_ECOCHK, ecochk);
- for_each_engine(engine, dev_priv, id) {
- /* GFX_MODE is per-ring on gen7+ */
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- }
- }
- static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
- {
- u32 ecochk, gab_ctl, ecobits;
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
- ECOBITS_PPGTT_CACHE64B);
- gab_ctl = I915_READ(GAB_CTL);
- I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
- ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
- }
- /* PPGTT support for Sandybdrige/Gen6 and later */
- static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
- {
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- unsigned int first_entry = start >> PAGE_SHIFT;
- unsigned int pde = first_entry / GEN6_PTES;
- unsigned int pte = first_entry % GEN6_PTES;
- unsigned int num_entries = length >> PAGE_SHIFT;
- gen6_pte_t scratch_pte =
- vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
- while (num_entries) {
- struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
- unsigned int end = min(pte + num_entries, GEN6_PTES);
- gen6_pte_t *vaddr;
- num_entries -= end - pte;
- /* Note that the hw doesn't support removing PDE on the fly
- * (they are cached inside the context with no means to
- * invalidate the cache), so we can only reset the PTE
- * entries back to scratch.
- */
- vaddr = kmap_atomic_px(pt);
- do {
- vaddr[pte++] = scratch_pte;
- } while (pte < end);
- kunmap_atomic(vaddr);
- pte = 0;
- }
- }
- static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
- {
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- unsigned first_entry = vma->node.start >> PAGE_SHIFT;
- unsigned act_pt = first_entry / GEN6_PTES;
- unsigned act_pte = first_entry % GEN6_PTES;
- const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
- struct sgt_dma iter = sgt_dma(vma);
- gen6_pte_t *vaddr;
- vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
- do {
- vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
- iter.dma += PAGE_SIZE;
- if (iter.dma == iter.max) {
- iter.sg = __sg_next(iter.sg);
- if (!iter.sg)
- break;
- iter.dma = sg_dma_address(iter.sg);
- iter.max = iter.dma + iter.sg->length;
- }
- if (++act_pte == GEN6_PTES) {
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
- act_pte = 0;
- }
- } while (1);
- kunmap_atomic(vaddr);
- }
- static int gen6_alloc_va_range(struct i915_address_space *vm,
- u64 start, u64 length)
- {
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_table *pt;
- u64 from = start;
- unsigned int pde;
- bool flush = false;
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
- if (pt == vm->scratch_pt) {
- pt = alloc_pt(vm);
- if (IS_ERR(pt))
- goto unwind_out;
- gen6_initialize_pt(vm, pt);
- ppgtt->pd.page_table[pde] = pt;
- gen6_write_pde(ppgtt, pde, pt);
- flush = true;
- }
- }
- if (flush) {
- mark_tlbs_dirty(ppgtt);
- wmb();
- }
- return 0;
- unwind_out:
- gen6_ppgtt_clear_range(vm, from, start);
- return -ENOMEM;
- }
- static int gen6_init_scratch(struct i915_address_space *vm)
- {
- int ret;
- ret = setup_scratch_page(vm, I915_GFP_DMA);
- if (ret)
- return ret;
- vm->scratch_pt = alloc_pt(vm);
- if (IS_ERR(vm->scratch_pt)) {
- cleanup_scratch_page(vm);
- return PTR_ERR(vm->scratch_pt);
- }
- gen6_initialize_pt(vm, vm->scratch_pt);
- return 0;
- }
- static void gen6_free_scratch(struct i915_address_space *vm)
- {
- free_pt(vm, vm->scratch_pt);
- cleanup_scratch_page(vm);
- }
- static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
- {
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory *pd = &ppgtt->pd;
- struct i915_page_table *pt;
- u32 pde;
- drm_mm_remove_node(&ppgtt->node);
- gen6_for_all_pdes(pt, pd, pde)
- if (pt != vm->scratch_pt)
- free_pt(vm, pt);
- gen6_free_scratch(vm);
- }
- static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
- {
- struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
- /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
- * allocator works in address space sizes, so it's multiplied by page
- * size. We allocate at the top of the GTT to avoid fragmentation.
- */
- BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
- ret = gen6_init_scratch(vm);
- if (ret)
- return ret;
- ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
- GEN6_PD_SIZE, GEN6_PD_ALIGN,
- I915_COLOR_UNEVICTABLE,
- 0, ggtt->base.total,
- PIN_HIGH);
- if (ret)
- goto err_out;
- if (ppgtt->node.start < ggtt->mappable_end)
- DRM_DEBUG("Forced to use aperture for PDEs\n");
- ppgtt->pd.base.ggtt_offset =
- ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
- ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
- return 0;
- err_out:
- gen6_free_scratch(vm);
- return ret;
- }
- static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
- {
- return gen6_ppgtt_allocate_page_directories(ppgtt);
- }
- static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
- u64 start, u64 length)
- {
- struct i915_page_table *unused;
- u32 pde;
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
- ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
- }
- static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
- {
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
- ppgtt->base.pte_encode = ggtt->base.pte_encode;
- if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
- ppgtt->switch_mm = gen6_mm_switch;
- else if (IS_HASWELL(dev_priv))
- ppgtt->switch_mm = hsw_mm_switch;
- else if (IS_GEN7(dev_priv))
- ppgtt->switch_mm = gen7_mm_switch;
- else
- BUG();
- ret = gen6_ppgtt_alloc(ppgtt);
- if (ret)
- return ret;
- ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
- gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
- ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
- if (ret) {
- gen6_ppgtt_cleanup(&ppgtt->base);
- return ret;
- }
- ppgtt->base.clear_range = gen6_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->base.cleanup = gen6_ppgtt_cleanup;
- ppgtt->debug_dump = gen6_dump_ppgtt;
- DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
- ppgtt->node.size >> 20,
- ppgtt->node.start / PAGE_SIZE);
- DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
- ppgtt->pd.base.ggtt_offset << 10);
- return 0;
- }
- static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_private *dev_priv)
- {
- ppgtt->base.i915 = dev_priv;
- ppgtt->base.dma = &dev_priv->drm.pdev->dev;
- if (INTEL_INFO(dev_priv)->gen < 8)
- return gen6_ppgtt_init(ppgtt);
- else
- return gen8_ppgtt_init(ppgtt);
- }
- static void i915_address_space_init(struct i915_address_space *vm,
- struct drm_i915_private *dev_priv,
- const char *name)
- {
- i915_gem_timeline_init(dev_priv, &vm->timeline, name);
- drm_mm_init(&vm->mm, 0, vm->total);
- vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
- INIT_LIST_HEAD(&vm->active_list);
- INIT_LIST_HEAD(&vm->inactive_list);
- INIT_LIST_HEAD(&vm->unbound_list);
- list_add_tail(&vm->global_link, &dev_priv->vm_list);
- pagevec_init(&vm->free_pages);
- }
- static void i915_address_space_fini(struct i915_address_space *vm)
- {
- if (pagevec_count(&vm->free_pages))
- vm_free_pages_release(vm);
- i915_gem_timeline_fini(&vm->timeline);
- drm_mm_takedown(&vm->mm);
- list_del(&vm->global_link);
- }
- static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
- {
- /* This function is for gtt related workarounds. This function is
- * called on driver load and after a GPU reset, so you can place
- * workarounds here even if they get overwritten by GPU reset.
- */
- /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
- if (IS_BROADWELL(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
- else if (IS_CHERRYVIEW(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
- else if (IS_GEN9_LP(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
- }
- int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
- {
- gtt_write_workarounds(dev_priv);
- /* In the case of execlists, PPGTT is enabled by the context descriptor
- * and the PDPs are contained within the context itself. We don't
- * need to do anything here. */
- if (i915.enable_execlists)
- return 0;
- if (!USES_PPGTT(dev_priv))
- return 0;
- if (IS_GEN6(dev_priv))
- gen6_ppgtt_enable(dev_priv);
- else if (IS_GEN7(dev_priv))
- gen7_ppgtt_enable(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 8)
- gen8_ppgtt_enable(dev_priv);
- else
- MISSING_CASE(INTEL_GEN(dev_priv));
- return 0;
- }
- struct i915_hw_ppgtt *
- i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv,
- const char *name)
- {
- struct i915_hw_ppgtt *ppgtt;
- int ret;
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt)
- return ERR_PTR(-ENOMEM);
- ret = __hw_ppgtt_init(ppgtt, dev_priv);
- if (ret) {
- kfree(ppgtt);
- return ERR_PTR(ret);
- }
- kref_init(&ppgtt->ref);
- i915_address_space_init(&ppgtt->base, dev_priv, name);
- ppgtt->base.file = fpriv;
- trace_i915_ppgtt_create(&ppgtt->base);
- return ppgtt;
- }
- void i915_ppgtt_close(struct i915_address_space *vm)
- {
- struct list_head *phases[] = {
- &vm->active_list,
- &vm->inactive_list,
- &vm->unbound_list,
- NULL,
- }, **phase;
- GEM_BUG_ON(vm->closed);
- vm->closed = true;
- for (phase = phases; *phase; phase++) {
- struct i915_vma *vma, *vn;
- list_for_each_entry_safe(vma, vn, *phase, vm_link)
- if (!i915_vma_is_closed(vma))
- i915_vma_close(vma);
- }
- }
- void i915_ppgtt_release(struct kref *kref)
- {
- struct i915_hw_ppgtt *ppgtt =
- container_of(kref, struct i915_hw_ppgtt, ref);
- trace_i915_ppgtt_release(&ppgtt->base);
- /* vmas should already be unbound and destroyed */
- WARN_ON(!list_empty(&ppgtt->base.active_list));
- WARN_ON(!list_empty(&ppgtt->base.inactive_list));
- WARN_ON(!list_empty(&ppgtt->base.unbound_list));
- ppgtt->base.cleanup(&ppgtt->base);
- i915_address_space_fini(&ppgtt->base);
- kfree(ppgtt);
- }
- /* Certain Gen5 chipsets require require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
- static bool needs_idle_maps(struct drm_i915_private *dev_priv)
- {
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
- */
- return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
- }
- void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
- {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- if (INTEL_INFO(dev_priv)->gen < 6)
- return;
- for_each_engine(engine, dev_priv, id) {
- u32 fault_reg;
- fault_reg = I915_READ(RING_FAULT_REG(engine));
- if (fault_reg & RING_FAULT_VALID) {
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault_reg & PAGE_MASK,
- fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault_reg),
- RING_FAULT_FAULT_TYPE(fault_reg));
- I915_WRITE(RING_FAULT_REG(engine),
- fault_reg & ~RING_FAULT_VALID);
- }
- }
- /* Engine specific init may not have been done till this point. */
- if (dev_priv->engine[RCS])
- POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
- }
- void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
- {
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- /* Don't bother messing with faults pre GEN6 as we have little
- * documentation supporting that it's a good idea.
- */
- if (INTEL_GEN(dev_priv) < 6)
- return;
- i915_check_and_clear_faults(dev_priv);
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
- i915_ggtt_invalidate(dev_priv);
- }
- int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
- {
- do {
- if (dma_map_sg(&obj->base.dev->pdev->dev,
- pages->sgl, pages->nents,
- PCI_DMA_BIDIRECTIONAL))
- return 0;
- /* If the DMA remap fails, one cause can be that we have
- * too many objects pinned in a small remapping table,
- * such as swiotlb. Incrementally purge all other objects and
- * try again - if there are no more pages to remove from
- * the DMA remapper, i915_gem_shrink will return 0.
- */
- GEM_BUG_ON(obj->mm.pages == pages);
- } while (i915_gem_shrink(to_i915(obj->base.dev),
- obj->base.size >> PAGE_SHIFT, NULL,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE));
- return -ENOSPC;
- }
- static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
- {
- writeq(pte, addr);
- }
- static void gen8_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
- {
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *pte =
- (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
- gen8_set_pte(pte, gen8_pte_encode(addr, level));
- ggtt->invalidate(vm->i915);
- }
- static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 unused)
- {
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- struct sgt_iter sgt_iter;
- gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
- dma_addr_t addr;
- gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
- gtt_entries += vma->node.start >> PAGE_SHIFT;
- for_each_sgt_dma(addr, sgt_iter, vma->pages)
- gen8_set_pte(gtt_entries++, pte_encode | addr);
- wmb();
- /* This next bit makes the above posting read even more important. We
- * want to flush the TLBs only after we're certain all the PTE updates
- * have finished.
- */
- ggtt->invalidate(vm->i915);
- }
- static void gen6_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
- {
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *pte =
- (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
- iowrite32(vm->pte_encode(addr, level, flags), pte);
- ggtt->invalidate(vm->i915);
- }
- /*
- * Binds an object into the global gtt with the specified cache level. The object
- * will be accessible to the GPU via commands whose operands reference offsets
- * within the global GTT as well as accessible by the GPU through the GMADR
- * mapped BAR (dev_priv->mm.gtt->gtt).
- */
- static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
- {
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
- unsigned int i = vma->node.start >> PAGE_SHIFT;
- struct sgt_iter iter;
- dma_addr_t addr;
- for_each_sgt_dma(addr, iter, vma->pages)
- iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
- wmb();
- /* This next bit makes the above posting read even more important. We
- * want to flush the TLBs only after we're certain all the PTE updates
- * have finished.
- */
- ggtt->invalidate(vm->i915);
- }
- static void nop_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
- {
- }
- static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
- {
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned first_entry = start >> PAGE_SHIFT;
- unsigned num_entries = length >> PAGE_SHIFT;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
- gen8_pte_t __iomem *gtt_base =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
- for (i = 0; i < num_entries; i++)
- gen8_set_pte(>t_base[i], scratch_pte);
- }
- static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
- {
- struct drm_i915_private *dev_priv = vm->i915;
- /*
- * Make sure the internal GAM fifo has been cleared of all GTT
- * writes before exiting stop_machine(). This guarantees that
- * any aperture accesses waiting to start in another process
- * cannot back up behind the GTT writes causing a hang.
- * The register can be any arbitrary GAM register.
- */
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
- }
- struct insert_page {
- struct i915_address_space *vm;
- dma_addr_t addr;
- u64 offset;
- enum i915_cache_level level;
- };
- static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
- {
- struct insert_page *arg = _arg;
- gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
- bxt_vtd_ggtt_wa(arg->vm);
- return 0;
- }
- static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
- {
- struct insert_page arg = { vm, addr, offset, level };
- stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
- }
- struct insert_entries {
- struct i915_address_space *vm;
- struct i915_vma *vma;
- enum i915_cache_level level;
- };
- static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
- {
- struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
- bxt_vtd_ggtt_wa(arg->vm);
- return 0;
- }
- static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 unused)
- {
- struct insert_entries arg = { vm, vma, level };
- stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
- }
- struct clear_range {
- struct i915_address_space *vm;
- u64 start;
- u64 length;
- };
- static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
- {
- struct clear_range *arg = _arg;
- gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
- bxt_vtd_ggtt_wa(arg->vm);
- return 0;
- }
- static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
- u64 start,
- u64 length)
- {
- struct clear_range arg = { vm, start, length };
- stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
- }
- static void gen6_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
- {
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned first_entry = start >> PAGE_SHIFT;
- unsigned num_entries = length >> PAGE_SHIFT;
- gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, 0);
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, >t_base[i]);
- }
- static void i915_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level cache_level,
- u32 unused)
- {
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
- }
- static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
- {
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
- flags);
- }
- static void i915_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
- {
- intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
- }
- static int ggtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
- {
- struct drm_i915_private *i915 = vma->vm->i915;
- struct drm_i915_gem_object *obj = vma->obj;
- u32 pte_flags;
- if (unlikely(!vma->pages)) {
- int ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
- }
- /* Currently applicable only to VLV */
- pte_flags = 0;
- if (obj->gt_ro)
- pte_flags |= PTE_READ_ONLY;
- intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
- intel_runtime_pm_put(i915);
- /*
- * Without aliasing PPGTT there's no difference between
- * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
- * upgrade to both bound if we bind either to avoid double-binding.
- */
- vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
- return 0;
- }
- static void ggtt_unbind_vma(struct i915_vma *vma)
- {
- struct drm_i915_private *i915 = vma->vm->i915;
- intel_runtime_pm_get(i915);
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
- intel_runtime_pm_put(i915);
- }
- static int aliasing_gtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
- {
- struct drm_i915_private *i915 = vma->vm->i915;
- u32 pte_flags;
- int ret;
- if (unlikely(!vma->pages)) {
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
- }
- /* Currently applicable only to VLV */
- pte_flags = 0;
- if (vma->obj->gt_ro)
- pte_flags |= PTE_READ_ONLY;
- if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
- if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
- appgtt->base.allocate_va_range) {
- ret = appgtt->base.allocate_va_range(&appgtt->base,
- vma->node.start,
- vma->size);
- if (ret)
- goto err_pages;
- }
- appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
- pte_flags);
- }
- if (flags & I915_VMA_GLOBAL_BIND) {
- intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
- intel_runtime_pm_put(i915);
- }
- return 0;
- err_pages:
- if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
- if (vma->pages != vma->obj->mm.pages) {
- GEM_BUG_ON(!vma->pages);
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
- }
- return ret;
- }
- static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
- {
- struct drm_i915_private *i915 = vma->vm->i915;
- if (vma->flags & I915_VMA_GLOBAL_BIND) {
- intel_runtime_pm_get(i915);
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
- intel_runtime_pm_put(i915);
- }
- if (vma->flags & I915_VMA_LOCAL_BIND) {
- struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
- vm->clear_range(vm, vma->node.start, vma->size);
- }
- }
- void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
- {
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct device *kdev = &dev_priv->drm.pdev->dev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, 0)) {
- DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
- /* Wait a bit, in hopes it avoids the hang */
- udelay(10);
- }
- }
- dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
- }
- static void i915_gtt_color_adjust(const struct drm_mm_node *node,
- unsigned long color,
- u64 *start,
- u64 *end)
- {
- if (node->allocated && node->color != color)
- *start += I915_GTT_PAGE_SIZE;
- /* Also leave a space between the unallocated reserved node after the
- * GTT and any objects within the GTT, i.e. we use the color adjustment
- * to insert a guard page to prevent prefetches crossing over the
- * GTT boundary.
- */
- node = list_next_entry(node, node_list);
- if (node->color != color)
- *end -= I915_GTT_PAGE_SIZE;
- }
- int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
- {
- struct i915_ggtt *ggtt = &i915->ggtt;
- struct i915_hw_ppgtt *ppgtt;
- int err;
- ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
- if (IS_ERR(ppgtt))
- return PTR_ERR(ppgtt);
- if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
- err = -ENODEV;
- goto err_ppgtt;
- }
- if (ppgtt->base.allocate_va_range) {
- /* Note we only pre-allocate as far as the end of the global
- * GTT. On 48b / 4-level page-tables, the difference is very,
- * very significant! We have to preallocate as GVT/vgpu does
- * not like the page directory disappearing.
- */
- err = ppgtt->base.allocate_va_range(&ppgtt->base,
- 0, ggtt->base.total);
- if (err)
- goto err_ppgtt;
- }
- i915->mm.aliasing_ppgtt = ppgtt;
- WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
- ggtt->base.bind_vma = aliasing_gtt_bind_vma;
- WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
- ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
- return 0;
- err_ppgtt:
- i915_ppgtt_put(ppgtt);
- return err;
- }
- void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
- {
- struct i915_ggtt *ggtt = &i915->ggtt;
- struct i915_hw_ppgtt *ppgtt;
- ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
- if (!ppgtt)
- return;
- i915_ppgtt_put(ppgtt);
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- }
- int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
- {
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch page.
- * There are a number of places where the hardware apparently prefetches
- * past the end of the object, and we've seen multiple hangs with the
- * GPU head pointer stuck in a batchbuffer bound at the last page of the
- * aperture. One page should be enough to keep any prefetching inside
- * of the aperture.
- */
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long hole_start, hole_end;
- struct drm_mm_node *entry;
- int ret;
- ret = intel_vgt_balloon(dev_priv);
- if (ret)
- return ret;
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
- /* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
- DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
- ggtt->base.clear_range(&ggtt->base, hole_start,
- hole_end - hole_start);
- }
- /* And finally clear the reserved guard page */
- ggtt->base.clear_range(&ggtt->base,
- ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
- if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
- ret = i915_gem_init_aliasing_ppgtt(dev_priv);
- if (ret)
- goto err;
- }
- return 0;
- err:
- drm_mm_remove_node(&ggtt->error_capture);
- return ret;
- }
- /**
- * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
- * @dev_priv: i915 device
- */
- void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
- {
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_vma *vma, *vn;
- ggtt->base.closed = true;
- mutex_lock(&dev_priv->drm.struct_mutex);
- WARN_ON(!list_empty(&ggtt->base.active_list));
- list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
- WARN_ON(i915_vma_unbind(vma));
- mutex_unlock(&dev_priv->drm.struct_mutex);
- i915_gem_cleanup_stolen(&dev_priv->drm);
- mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_fini_aliasing_ppgtt(dev_priv);
- if (drm_mm_node_allocated(&ggtt->error_capture))
- drm_mm_remove_node(&ggtt->error_capture);
- if (drm_mm_initialized(&ggtt->base.mm)) {
- intel_vgt_deballoon(dev_priv);
- i915_address_space_fini(&ggtt->base);
- }
- ggtt->base.cleanup(&ggtt->base);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- arch_phys_wc_del(ggtt->mtrr);
- io_mapping_fini(&ggtt->mappable);
- }
- static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
- {
- snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
- return snb_gmch_ctl << 20;
- }
- static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
- {
- bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
- if (bdw_gmch_ctl)
- bdw_gmch_ctl = 1 << bdw_gmch_ctl;
- #ifdef CONFIG_X86_32
- /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
- if (bdw_gmch_ctl > 4)
- bdw_gmch_ctl = 4;
- #endif
- return bdw_gmch_ctl << 20;
- }
- static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
- {
- gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GGMS_MASK;
- if (gmch_ctrl)
- return 1 << (20 + gmch_ctrl);
- return 0;
- }
- static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
- {
- snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
- return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
- }
- static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
- {
- bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
- return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
- }
- static size_t chv_get_stolen_size(u16 gmch_ctrl)
- {
- gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GMS_MASK;
- /*
- * 0x0 to 0x10: 32MB increments starting at 0MB
- * 0x11 to 0x16: 4MB increments starting at 8MB
- * 0x17 to 0x1d: 4MB increments start at 36MB
- */
- if (gmch_ctrl < 0x11)
- return (size_t)gmch_ctrl << 25;
- else if (gmch_ctrl < 0x17)
- return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
- else
- return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
- }
- static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
- {
- gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
- gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
- if (gen9_gmch_ctl < 0xf0)
- return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
- else
- /* 4MB increments starting at 0xf0 for 4MB */
- return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
- }
- static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
- {
- struct drm_i915_private *dev_priv = ggtt->base.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- phys_addr_t phys_addr;
- int ret;
- /* For Modern GENs the PTEs and register space are split in the BAR */
- phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
- /*
- * On BXT writes larger than 64 bit to the GTT pagetable range will be
- * dropped. For WC mappings in general we have 64 byte burst writes
- * when the WC buffer is flushed, so we can't use it, but have to
- * resort to an uncached mapping. The WC issue is easily caught by the
- * readback check when writing GTT PTE entries.
- */
- if (IS_GEN9_LP(dev_priv))
- ggtt->gsm = ioremap_nocache(phys_addr, size);
- else
- ggtt->gsm = ioremap_wc(phys_addr, size);
- if (!ggtt->gsm) {
- DRM_ERROR("Failed to map the ggtt page table\n");
- return -ENOMEM;
- }
- ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
- if (ret) {
- DRM_ERROR("Scratch setup failed\n");
- /* iounmap will also get called at remove, but meh */
- iounmap(ggtt->gsm);
- return ret;
- }
- return 0;
- }
- static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
- {
- /* XXX: spec is unclear if this is still needed for CNL+ */
- if (!USES_PPGTT(dev_priv)) {
- I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_UC);
- return;
- }
- I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
- I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
- I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
- }
- /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
- * bits. When using advanced contexts each context stores its own PAT, but
- * writing this data shouldn't be harmful even in those cases. */
- static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
- {
- u64 pat;
- pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
- GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
- GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
- GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
- GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
- GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
- GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
- GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
- if (!USES_PPGTT(dev_priv))
- /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
- * so RTL will always use the value corresponding to
- * pat_sel = 000".
- * So let's disable cache for GGTT to avoid screen corruptions.
- * MOCS still can be used though.
- * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
- * before this patch, i.e. the same uncached + snooping access
- * like on gen6/7 seems to be in effect.
- * - So this just fixes blitter/render access. Again it looks
- * like it's not just uncached access, but uncached + snooping.
- * So we can still hold onto all our assumptions wrt cpu
- * clflushing on LLC machines.
- */
- pat = GEN8_PPAT(0, GEN8_PPAT_UC);
- /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
- * write would work. */
- I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
- I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
- }
- static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
- {
- u64 pat;
- /*
- * Map WB on BDW to snooped on CHV.
- *
- * Only the snoop bit has meaning for CHV, the rest is
- * ignored.
- *
- * The hardware will never snoop for certain types of accesses:
- * - CPU GTT (GMADR->GGTT->no snoop->memory)
- * - PPGTT page tables
- * - some other special cycles
- *
- * As with BDW, we also need to consider the following for GT accesses:
- * "For GGTT, there is NO pat_sel[2:0] from the entry,
- * so RTL will always use the value corresponding to
- * pat_sel = 000".
- * Which means we must set the snoop bit in PAT entry 0
- * in order to keep the global status page working.
- */
- pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
- GEN8_PPAT(1, 0) |
- GEN8_PPAT(2, 0) |
- GEN8_PPAT(3, 0) |
- GEN8_PPAT(4, CHV_PPAT_SNOOP) |
- GEN8_PPAT(5, CHV_PPAT_SNOOP) |
- GEN8_PPAT(6, CHV_PPAT_SNOOP) |
- GEN8_PPAT(7, CHV_PPAT_SNOOP);
- I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
- I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
- }
- static void gen6_gmch_remove(struct i915_address_space *vm)
- {
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- iounmap(ggtt->gsm);
- cleanup_scratch_page(vm);
- }
- static int gen8_gmch_probe(struct i915_ggtt *ggtt)
- {
- struct drm_i915_private *dev_priv = ggtt->base.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- unsigned int size;
- u16 snb_gmch_ctl;
- int err;
- /* TODO: We're not aware of mappable constraints on gen8 yet */
- ggtt->mappable_base = pci_resource_start(pdev, 2);
- ggtt->mappable_end = pci_resource_len(pdev, 2);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (INTEL_GEN(dev_priv) >= 9) {
- ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
- size = gen8_get_total_gtt_size(snb_gmch_ctl);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
- size = chv_get_total_gtt_size(snb_gmch_ctl);
- } else {
- ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
- size = gen8_get_total_gtt_size(snb_gmch_ctl);
- }
- ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(dev_priv);
- else
- bdw_setup_private_ppat(dev_priv);
- ggtt->base.cleanup = gen6_gmch_remove;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.insert_page = gen8_ggtt_insert_page;
- ggtt->base.clear_range = nop_clear_range;
- if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
- ggtt->base.clear_range = gen8_ggtt_clear_range;
- ggtt->base.insert_entries = gen8_ggtt_insert_entries;
- /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
- if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
- ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->base.clear_range != nop_clear_range)
- ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
- }
- ggtt->invalidate = gen6_ggtt_invalidate;
- return ggtt_probe_common(ggtt, size);
- }
- static int gen6_gmch_probe(struct i915_ggtt *ggtt)
- {
- struct drm_i915_private *dev_priv = ggtt->base.i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- unsigned int size;
- u16 snb_gmch_ctl;
- int err;
- ggtt->mappable_base = pci_resource_start(pdev, 2);
- ggtt->mappable_end = pci_resource_len(pdev, 2);
- /* 64/512MB is the current min/max we actually know of, but this is just
- * a coarse sanity check.
- */
- if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
- return -ENXIO;
- }
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
- if (err)
- DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
- size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
- ggtt->base.clear_range = gen6_ggtt_clear_range;
- ggtt->base.insert_page = gen6_ggtt_insert_page;
- ggtt->base.insert_entries = gen6_ggtt_insert_entries;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.cleanup = gen6_gmch_remove;
- ggtt->invalidate = gen6_ggtt_invalidate;
- if (HAS_EDRAM(dev_priv))
- ggtt->base.pte_encode = iris_pte_encode;
- else if (IS_HASWELL(dev_priv))
- ggtt->base.pte_encode = hsw_pte_encode;
- else if (IS_VALLEYVIEW(dev_priv))
- ggtt->base.pte_encode = byt_pte_encode;
- else if (INTEL_GEN(dev_priv) >= 7)
- ggtt->base.pte_encode = ivb_pte_encode;
- else
- ggtt->base.pte_encode = snb_pte_encode;
- return ggtt_probe_common(ggtt, size);
- }
- static void i915_gmch_remove(struct i915_address_space *vm)
- {
- intel_gmch_remove();
- }
- static int i915_gmch_probe(struct i915_ggtt *ggtt)
- {
- struct drm_i915_private *dev_priv = ggtt->base.i915;
- int ret;
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
- if (!ret) {
- DRM_ERROR("failed to set up gmch\n");
- return -EIO;
- }
- intel_gtt_get(&ggtt->base.total,
- &ggtt->stolen_size,
- &ggtt->mappable_base,
- &ggtt->mappable_end);
- ggtt->do_idle_maps = needs_idle_maps(dev_priv);
- ggtt->base.insert_page = i915_ggtt_insert_page;
- ggtt->base.insert_entries = i915_ggtt_insert_entries;
- ggtt->base.clear_range = i915_ggtt_clear_range;
- ggtt->base.bind_vma = ggtt_bind_vma;
- ggtt->base.unbind_vma = ggtt_unbind_vma;
- ggtt->base.cleanup = i915_gmch_remove;
- ggtt->invalidate = gmch_ggtt_invalidate;
- if (unlikely(ggtt->do_idle_maps))
- DRM_INFO("applying Ironlake quirks for intel_iommu\n");
- return 0;
- }
- /**
- * i915_ggtt_probe_hw - Probe GGTT hardware location
- * @dev_priv: i915 device
- */
- int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
- {
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
- ggtt->base.i915 = dev_priv;
- ggtt->base.dma = &dev_priv->drm.pdev->dev;
- if (INTEL_GEN(dev_priv) <= 5)
- ret = i915_gmch_probe(ggtt);
- else if (INTEL_GEN(dev_priv) < 8)
- ret = gen6_gmch_probe(ggtt);
- else
- ret = gen8_gmch_probe(ggtt);
- if (ret)
- return ret;
- /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
- * This is easier than doing range restriction on the fly, as we
- * currently don't have any bits spare to pass in this upper
- * restriction!
- */
- if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
- ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
- ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
- }
- if ((ggtt->base.total - 1) >> 32) {
- DRM_ERROR("We never expected a Global GTT with more than 32bits"
- " of address space! Found %lldM!\n",
- ggtt->base.total >> 20);
- ggtt->base.total = 1ULL << 32;
- ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
- }
- if (ggtt->mappable_end > ggtt->base.total) {
- DRM_ERROR("mappable aperture extends past end of GGTT,"
- " aperture=%llx, total=%llx\n",
- ggtt->mappable_end, ggtt->base.total);
- ggtt->mappable_end = ggtt->base.total;
- }
- /* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_INFO("Memory usable by graphics device = %lluM\n",
- ggtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
- if (intel_vtd_active())
- DRM_INFO("VT-d active for gfx access\n");
- return 0;
- }
- /**
- * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev_priv: i915 device
- */
- int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
- {
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- int ret;
- INIT_LIST_HEAD(&dev_priv->vm_list);
- /* Note that we use page colouring to enforce a guard page at the
- * end of the address space. This is required as the CS may prefetch
- * beyond the end of the batch buffer, across the page boundary,
- * and beyond the end of the GTT if we do not provide a guard.
- */
- mutex_lock(&dev_priv->drm.struct_mutex);
- i915_address_space_init(&ggtt->base, dev_priv, "[global]");
- if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
- ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
- dev_priv->ggtt.mappable_base,
- dev_priv->ggtt.mappable_end)) {
- ret = -EIO;
- goto out_gtt_cleanup;
- }
- ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
- /*
- * Initialise stolen early so that we may reserve preallocated
- * objects for the BIOS to KMS transition.
- */
- ret = i915_gem_init_stolen(dev_priv);
- if (ret)
- goto out_gtt_cleanup;
- return 0;
- out_gtt_cleanup:
- ggtt->base.cleanup(&ggtt->base);
- return ret;
- }
- int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
- {
- if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
- return -EIO;
- return 0;
- }
- void i915_ggtt_enable_guc(struct drm_i915_private *i915)
- {
- GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
- i915->ggtt.invalidate = guc_ggtt_invalidate;
- }
- void i915_ggtt_disable_guc(struct drm_i915_private *i915)
- {
- /* We should only be called after i915_ggtt_enable_guc() */
- GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
- i915->ggtt.invalidate = gen6_ggtt_invalidate;
- }
- void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
- {
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_object *obj, *on;
- i915_check_and_clear_faults(dev_priv);
- /* First fill our portion of the GTT with scratch pages */
- ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
- ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
- /* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(obj, on,
- &dev_priv->mm.bound_list, global_link) {
- bool ggtt_bound = false;
- struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->vm != &ggtt->base)
- continue;
- if (!i915_vma_unbind(vma))
- continue;
- WARN_ON(i915_vma_bind(vma, obj->cache_level,
- PIN_UPDATE));
- ggtt_bound = true;
- }
- if (ggtt_bound)
- WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
- }
- ggtt->base.closed = false;
- if (INTEL_GEN(dev_priv) >= 8) {
- if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(dev_priv);
- else
- bdw_setup_private_ppat(dev_priv);
- return;
- }
- if (USES_PPGTT(dev_priv)) {
- struct i915_address_space *vm;
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- struct i915_hw_ppgtt *ppgtt;
- if (i915_is_ggtt(vm))
- ppgtt = dev_priv->mm.aliasing_ppgtt;
- else
- ppgtt = i915_vm_to_ppgtt(vm);
- gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
- }
- }
- i915_ggtt_invalidate(dev_priv);
- }
- static struct scatterlist *
- rotate_pages(const dma_addr_t *in, unsigned int offset,
- unsigned int width, unsigned int height,
- unsigned int stride,
- struct sg_table *st, struct scatterlist *sg)
- {
- unsigned int column, row;
- unsigned int src_idx;
- for (column = 0; column < width; column++) {
- src_idx = stride * (height - 1) + column;
- for (row = 0; row < height; row++) {
- st->nents++;
- /* We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
- sg_set_page(sg, NULL, PAGE_SIZE, 0);
- sg_dma_address(sg) = in[offset + src_idx];
- sg_dma_len(sg) = PAGE_SIZE;
- sg = sg_next(sg);
- src_idx -= stride;
- }
- }
- return sg;
- }
- static noinline struct sg_table *
- intel_rotate_pages(struct intel_rotation_info *rot_info,
- struct drm_i915_gem_object *obj)
- {
- const unsigned long n_pages = obj->base.size / PAGE_SIZE;
- unsigned int size = intel_rotation_info_size(rot_info);
- struct sgt_iter sgt_iter;
- dma_addr_t dma_addr;
- unsigned long i;
- dma_addr_t *page_addr_list;
- struct sg_table *st;
- struct scatterlist *sg;
- int ret = -ENOMEM;
- /* Allocate a temporary list of source pages for random access. */
- page_addr_list = kvmalloc_array(n_pages,
- sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!page_addr_list)
- return ERR_PTR(ret);
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
- /* Populate source page list from the object. */
- i = 0;
- for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
- page_addr_list[i++] = dma_addr;
- GEM_BUG_ON(i != n_pages);
- st->nents = 0;
- sg = st->sgl;
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
- sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
- rot_info->plane[i].width, rot_info->plane[i].height,
- rot_info->plane[i].stride, st, sg);
- }
- DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
- kvfree(page_addr_list);
- return st;
- err_sg_alloc:
- kfree(st);
- err_st_alloc:
- kvfree(page_addr_list);
- DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
- return ERR_PTR(ret);
- }
- static noinline struct sg_table *
- intel_partial_pages(const struct i915_ggtt_view *view,
- struct drm_i915_gem_object *obj)
- {
- struct sg_table *st;
- struct scatterlist *sg, *iter;
- unsigned int count = view->partial.size;
- unsigned int offset;
- int ret = -ENOMEM;
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
- ret = sg_alloc_table(st, count, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
- iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
- GEM_BUG_ON(!iter);
- sg = st->sgl;
- st->nents = 0;
- do {
- unsigned int len;
- len = min(iter->length - (offset << PAGE_SHIFT),
- count << PAGE_SHIFT);
- sg_set_page(sg, NULL, len, 0);
- sg_dma_address(sg) =
- sg_dma_address(iter) + (offset << PAGE_SHIFT);
- sg_dma_len(sg) = len;
- st->nents++;
- count -= len >> PAGE_SHIFT;
- if (count == 0) {
- sg_mark_end(sg);
- return st;
- }
- sg = __sg_next(sg);
- iter = __sg_next(iter);
- offset = 0;
- } while (1);
- err_sg_alloc:
- kfree(st);
- err_st_alloc:
- return ERR_PTR(ret);
- }
- static int
- i915_get_ggtt_vma_pages(struct i915_vma *vma)
- {
- int ret;
- /* The vma->pages are only valid within the lifespan of the borrowed
- * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
- * must be the vma->pages. A simple rule is that vma->pages must only
- * be accessed when the obj->mm.pages are pinned.
- */
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
- switch (vma->ggtt_view.type) {
- case I915_GGTT_VIEW_NORMAL:
- vma->pages = vma->obj->mm.pages;
- return 0;
- case I915_GGTT_VIEW_ROTATED:
- vma->pages =
- intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
- break;
- case I915_GGTT_VIEW_PARTIAL:
- vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
- break;
- default:
- WARN_ONCE(1, "GGTT view %u not implemented!\n",
- vma->ggtt_view.type);
- return -EINVAL;
- }
- ret = 0;
- if (unlikely(IS_ERR(vma->pages))) {
- ret = PTR_ERR(vma->pages);
- vma->pages = NULL;
- DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
- }
- return ret;
- }
- /**
- * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
- * @vm: the &struct i915_address_space
- * @node: the &struct drm_mm_node (typically i915_vma.mode)
- * @size: how much space to allocate inside the GTT,
- * must be #I915_GTT_PAGE_SIZE aligned
- * @offset: where to insert inside the GTT,
- * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
- * (@offset + @size) must fit within the address space
- * @color: color to apply to node, if this node is not from a VMA,
- * color must be #I915_COLOR_UNEVICTABLE
- * @flags: control search and eviction behaviour
- *
- * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
- * the address space (using @size and @color). If the @node does not fit, it
- * tries to evict any overlapping nodes from the GTT, including any
- * neighbouring nodes if the colors do not match (to ensure guard pages between
- * differing domains). See i915_gem_evict_for_node() for the gory details
- * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
- * evicting active overlapping objects, and any overlapping node that is pinned
- * or marked as unevictable will also result in failure.
- *
- * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
- * asked to wait for eviction and interrupted.
- */
- int i915_gem_gtt_reserve(struct i915_address_space *vm,
- struct drm_mm_node *node,
- u64 size, u64 offset, unsigned long color,
- unsigned int flags)
- {
- int err;
- GEM_BUG_ON(!size);
- GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
- GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
- GEM_BUG_ON(drm_mm_node_allocated(node));
- node->size = size;
- node->start = offset;
- node->color = color;
- err = drm_mm_reserve_node(&vm->mm, node);
- if (err != -ENOSPC)
- return err;
- if (flags & PIN_NOEVICT)
- return -ENOSPC;
- err = i915_gem_evict_for_node(vm, node, flags);
- if (err == 0)
- err = drm_mm_reserve_node(&vm->mm, node);
- return err;
- }
- static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
- {
- u64 range, addr;
- GEM_BUG_ON(range_overflows(start, len, end));
- GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
- range = round_down(end - len, align) - round_up(start, align);
- if (range) {
- if (sizeof(unsigned long) == sizeof(u64)) {
- addr = get_random_long();
- } else {
- addr = get_random_int();
- if (range > U32_MAX) {
- addr <<= 32;
- addr |= get_random_int();
- }
- }
- div64_u64_rem(addr, range, &addr);
- start += addr;
- }
- return round_up(start, align);
- }
- /**
- * i915_gem_gtt_insert - insert a node into an address_space (GTT)
- * @vm: the &struct i915_address_space
- * @node: the &struct drm_mm_node (typically i915_vma.node)
- * @size: how much space to allocate inside the GTT,
- * must be #I915_GTT_PAGE_SIZE aligned
- * @alignment: required alignment of starting offset, may be 0 but
- * if specified, this must be a power-of-two and at least
- * #I915_GTT_MIN_ALIGNMENT
- * @color: color to apply to node
- * @start: start of any range restriction inside GTT (0 for all),
- * must be #I915_GTT_PAGE_SIZE aligned
- * @end: end of any range restriction inside GTT (U64_MAX for all),
- * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
- * @flags: control search and eviction behaviour
- *
- * i915_gem_gtt_insert() first searches for an available hole into which
- * is can insert the node. The hole address is aligned to @alignment and
- * its @size must then fit entirely within the [@start, @end] bounds. The
- * nodes on either side of the hole must match @color, or else a guard page
- * will be inserted between the two nodes (or the node evicted). If no
- * suitable hole is found, first a victim is randomly selected and tested
- * for eviction, otherwise then the LRU list of objects within the GTT
- * is scanned to find the first set of replacement nodes to create the hole.
- * Those old overlapping nodes are evicted from the GTT (and so must be
- * rebound before any future use). Any node that is currently pinned cannot
- * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
- * active and #PIN_NONBLOCK is specified, that node is also skipped when
- * searching for an eviction candidate. See i915_gem_evict_something() for
- * the gory details on the eviction algorithm.
- *
- * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
- * asked to wait for eviction and interrupted.
- */
- int i915_gem_gtt_insert(struct i915_address_space *vm,
- struct drm_mm_node *node,
- u64 size, u64 alignment, unsigned long color,
- u64 start, u64 end, unsigned int flags)
- {
- enum drm_mm_insert_mode mode;
- u64 offset;
- int err;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
- GEM_BUG_ON(!size);
- GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(alignment && !is_power_of_2(alignment));
- GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
- GEM_BUG_ON(start >= end);
- GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
- GEM_BUG_ON(drm_mm_node_allocated(node));
- if (unlikely(range_overflows(start, size, end)))
- return -ENOSPC;
- if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
- return -ENOSPC;
- mode = DRM_MM_INSERT_BEST;
- if (flags & PIN_HIGH)
- mode = DRM_MM_INSERT_HIGH;
- if (flags & PIN_MAPPABLE)
- mode = DRM_MM_INSERT_LOW;
- /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
- * so we know that we always have a minimum alignment of 4096.
- * The drm_mm range manager is optimised to return results
- * with zero alignment, so where possible use the optimal
- * path.
- */
- BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
- if (alignment <= I915_GTT_MIN_ALIGNMENT)
- alignment = 0;
- err = drm_mm_insert_node_in_range(&vm->mm, node,
- size, alignment, color,
- start, end, mode);
- if (err != -ENOSPC)
- return err;
- if (flags & PIN_NOEVICT)
- return -ENOSPC;
- /* No free space, pick a slot at random.
- *
- * There is a pathological case here using a GTT shared between
- * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
- *
- * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
- * (64k objects) (448k objects)
- *
- * Now imagine that the eviction LRU is ordered top-down (just because
- * pathology meets real life), and that we need to evict an object to
- * make room inside the aperture. The eviction scan then has to walk
- * the 448k list before it finds one within range. And now imagine that
- * it has to search for a new hole between every byte inside the memcpy,
- * for several simultaneous clients.
- *
- * On a full-ppgtt system, if we have run out of available space, there
- * will be lots and lots of objects in the eviction list! Again,
- * searching that LRU list may be slow if we are also applying any
- * range restrictions (e.g. restriction to low 4GiB) and so, for
- * simplicity and similarilty between different GTT, try the single
- * random replacement first.
- */
- offset = random_offset(start, end,
- size, alignment ?: I915_GTT_MIN_ALIGNMENT);
- err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
- if (err != -ENOSPC)
- return err;
- /* Randomly selected placement is pinned, do a search */
- err = i915_gem_evict_something(vm, size, alignment, color,
- start, end, flags);
- if (err)
- return err;
- return drm_mm_insert_node_in_range(&vm->mm, node,
- size, alignment, color,
- start, end, DRM_MM_INSERT_EVICT);
- }
- #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
- #include "selftests/mock_gtt.c"
- #include "selftests/i915_gem_gtt.c"
- #endif
|