vmwgfx_execbuf.c 128 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_reg.h"
  29. #include <drm/ttm/ttm_bo_api.h>
  30. #include <drm/ttm/ttm_placement.h>
  31. #include "vmwgfx_so.h"
  32. #include "vmwgfx_binding.h"
  33. #define VMW_RES_HT_ORDER 12
  34. /**
  35. * enum vmw_resource_relocation_type - Relocation type for resources
  36. *
  37. * @vmw_res_rel_normal: Traditional relocation. The resource id in the
  38. * command stream is replaced with the actual id after validation.
  39. * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
  40. * with a NOP.
  41. * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
  42. * after validation is -1, the command is replaced with a NOP. Otherwise no
  43. * action.
  44. */
  45. enum vmw_resource_relocation_type {
  46. vmw_res_rel_normal,
  47. vmw_res_rel_nop,
  48. vmw_res_rel_cond_nop,
  49. vmw_res_rel_max
  50. };
  51. /**
  52. * struct vmw_resource_relocation - Relocation info for resources
  53. *
  54. * @head: List head for the software context's relocation list.
  55. * @res: Non-ref-counted pointer to the resource.
  56. * @offset: Offset of single byte entries into the command buffer where the
  57. * id that needs fixup is located.
  58. * @rel_type: Type of relocation.
  59. */
  60. struct vmw_resource_relocation {
  61. struct list_head head;
  62. const struct vmw_resource *res;
  63. u32 offset:29;
  64. enum vmw_resource_relocation_type rel_type:3;
  65. };
  66. /**
  67. * struct vmw_resource_val_node - Validation info for resources
  68. *
  69. * @head: List head for the software context's resource list.
  70. * @hash: Hash entry for quick resouce to val_node lookup.
  71. * @res: Ref-counted pointer to the resource.
  72. * @switch_backup: Boolean whether to switch backup buffer on unreserve.
  73. * @new_backup: Refcounted pointer to the new backup buffer.
  74. * @staged_bindings: If @res is a context, tracks bindings set up during
  75. * the command batch. Otherwise NULL.
  76. * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  77. * @first_usage: Set to true the first time the resource is referenced in
  78. * the command stream.
  79. * @switching_backup: The command stream provides a new backup buffer for a
  80. * resource.
  81. * @no_buffer_needed: This means @switching_backup is true on first buffer
  82. * reference. So resource reservation does not need to allocate a backup
  83. * buffer for the resource.
  84. */
  85. struct vmw_resource_val_node {
  86. struct list_head head;
  87. struct drm_hash_item hash;
  88. struct vmw_resource *res;
  89. struct vmw_dma_buffer *new_backup;
  90. struct vmw_ctx_binding_state *staged_bindings;
  91. unsigned long new_backup_offset;
  92. u32 first_usage : 1;
  93. u32 switching_backup : 1;
  94. u32 no_buffer_needed : 1;
  95. };
  96. /**
  97. * struct vmw_cmd_entry - Describe a command for the verifier
  98. *
  99. * @user_allow: Whether allowed from the execbuf ioctl.
  100. * @gb_disable: Whether disabled if guest-backed objects are available.
  101. * @gb_enable: Whether enabled iff guest-backed objects are available.
  102. */
  103. struct vmw_cmd_entry {
  104. int (*func) (struct vmw_private *, struct vmw_sw_context *,
  105. SVGA3dCmdHeader *);
  106. bool user_allow;
  107. bool gb_disable;
  108. bool gb_enable;
  109. };
  110. #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
  111. [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
  112. (_gb_disable), (_gb_enable)}
  113. static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
  114. struct vmw_sw_context *sw_context,
  115. struct vmw_resource *ctx);
  116. static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
  117. struct vmw_sw_context *sw_context,
  118. SVGAMobId *id,
  119. struct vmw_dma_buffer **vmw_bo_p);
  120. static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
  121. struct vmw_dma_buffer *vbo,
  122. bool validate_as_mob,
  123. uint32_t *p_val_node);
  124. /**
  125. * vmw_ptr_diff - Compute the offset from a to b in bytes
  126. *
  127. * @a: A starting pointer.
  128. * @b: A pointer offset in the same address space.
  129. *
  130. * Returns: The offset in bytes between the two pointers.
  131. */
  132. static size_t vmw_ptr_diff(void *a, void *b)
  133. {
  134. return (unsigned long) b - (unsigned long) a;
  135. }
  136. /**
  137. * vmw_resources_unreserve - unreserve resources previously reserved for
  138. * command submission.
  139. *
  140. * @sw_context: pointer to the software context
  141. * @backoff: Whether command submission failed.
  142. */
  143. static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
  144. bool backoff)
  145. {
  146. struct vmw_resource_val_node *val;
  147. struct list_head *list = &sw_context->resource_list;
  148. if (sw_context->dx_query_mob && !backoff)
  149. vmw_context_bind_dx_query(sw_context->dx_query_ctx,
  150. sw_context->dx_query_mob);
  151. list_for_each_entry(val, list, head) {
  152. struct vmw_resource *res = val->res;
  153. bool switch_backup =
  154. (backoff) ? false : val->switching_backup;
  155. /*
  156. * Transfer staged context bindings to the
  157. * persistent context binding tracker.
  158. */
  159. if (unlikely(val->staged_bindings)) {
  160. if (!backoff) {
  161. vmw_binding_state_commit
  162. (vmw_context_binding_state(val->res),
  163. val->staged_bindings);
  164. }
  165. if (val->staged_bindings != sw_context->staged_bindings)
  166. vmw_binding_state_free(val->staged_bindings);
  167. else
  168. sw_context->staged_bindings_inuse = false;
  169. val->staged_bindings = NULL;
  170. }
  171. vmw_resource_unreserve(res, switch_backup, val->new_backup,
  172. val->new_backup_offset);
  173. vmw_dmabuf_unreference(&val->new_backup);
  174. }
  175. }
  176. /**
  177. * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
  178. * added to the validate list.
  179. *
  180. * @dev_priv: Pointer to the device private:
  181. * @sw_context: The validation context:
  182. * @node: The validation node holding this context.
  183. */
  184. static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
  185. struct vmw_sw_context *sw_context,
  186. struct vmw_resource_val_node *node)
  187. {
  188. int ret;
  189. ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
  190. if (unlikely(ret != 0))
  191. goto out_err;
  192. if (!sw_context->staged_bindings) {
  193. sw_context->staged_bindings =
  194. vmw_binding_state_alloc(dev_priv);
  195. if (IS_ERR(sw_context->staged_bindings)) {
  196. DRM_ERROR("Failed to allocate context binding "
  197. "information.\n");
  198. ret = PTR_ERR(sw_context->staged_bindings);
  199. sw_context->staged_bindings = NULL;
  200. goto out_err;
  201. }
  202. }
  203. if (sw_context->staged_bindings_inuse) {
  204. node->staged_bindings = vmw_binding_state_alloc(dev_priv);
  205. if (IS_ERR(node->staged_bindings)) {
  206. DRM_ERROR("Failed to allocate context binding "
  207. "information.\n");
  208. ret = PTR_ERR(node->staged_bindings);
  209. node->staged_bindings = NULL;
  210. goto out_err;
  211. }
  212. } else {
  213. node->staged_bindings = sw_context->staged_bindings;
  214. sw_context->staged_bindings_inuse = true;
  215. }
  216. return 0;
  217. out_err:
  218. return ret;
  219. }
  220. /**
  221. * vmw_resource_val_add - Add a resource to the software context's
  222. * resource list if it's not already on it.
  223. *
  224. * @sw_context: Pointer to the software context.
  225. * @res: Pointer to the resource.
  226. * @p_node On successful return points to a valid pointer to a
  227. * struct vmw_resource_val_node, if non-NULL on entry.
  228. */
  229. static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
  230. struct vmw_resource *res,
  231. struct vmw_resource_val_node **p_node)
  232. {
  233. struct vmw_private *dev_priv = res->dev_priv;
  234. struct vmw_resource_val_node *node;
  235. struct drm_hash_item *hash;
  236. int ret;
  237. if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
  238. &hash) == 0)) {
  239. node = container_of(hash, struct vmw_resource_val_node, hash);
  240. node->first_usage = false;
  241. if (unlikely(p_node != NULL))
  242. *p_node = node;
  243. return 0;
  244. }
  245. node = kzalloc(sizeof(*node), GFP_KERNEL);
  246. if (unlikely(!node)) {
  247. DRM_ERROR("Failed to allocate a resource validation "
  248. "entry.\n");
  249. return -ENOMEM;
  250. }
  251. node->hash.key = (unsigned long) res;
  252. ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
  253. if (unlikely(ret != 0)) {
  254. DRM_ERROR("Failed to initialize a resource validation "
  255. "entry.\n");
  256. kfree(node);
  257. return ret;
  258. }
  259. node->res = vmw_resource_reference(res);
  260. node->first_usage = true;
  261. if (unlikely(p_node != NULL))
  262. *p_node = node;
  263. if (!dev_priv->has_mob) {
  264. list_add_tail(&node->head, &sw_context->resource_list);
  265. return 0;
  266. }
  267. switch (vmw_res_type(res)) {
  268. case vmw_res_context:
  269. case vmw_res_dx_context:
  270. list_add(&node->head, &sw_context->ctx_resource_list);
  271. ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
  272. break;
  273. case vmw_res_cotable:
  274. list_add_tail(&node->head, &sw_context->ctx_resource_list);
  275. break;
  276. default:
  277. list_add_tail(&node->head, &sw_context->resource_list);
  278. break;
  279. }
  280. return ret;
  281. }
  282. /**
  283. * vmw_view_res_val_add - Add a view and the surface it's pointing to
  284. * to the validation list
  285. *
  286. * @sw_context: The software context holding the validation list.
  287. * @view: Pointer to the view resource.
  288. *
  289. * Returns 0 if success, negative error code otherwise.
  290. */
  291. static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
  292. struct vmw_resource *view)
  293. {
  294. int ret;
  295. /*
  296. * First add the resource the view is pointing to, otherwise
  297. * it may be swapped out when the view is validated.
  298. */
  299. ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
  300. if (ret)
  301. return ret;
  302. return vmw_resource_val_add(sw_context, view, NULL);
  303. }
  304. /**
  305. * vmw_view_id_val_add - Look up a view and add it and the surface it's
  306. * pointing to to the validation list.
  307. *
  308. * @sw_context: The software context holding the validation list.
  309. * @view_type: The view type to look up.
  310. * @id: view id of the view.
  311. *
  312. * The view is represented by a view id and the DX context it's created on,
  313. * or scheduled for creation on. If there is no DX context set, the function
  314. * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
  315. */
  316. static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
  317. enum vmw_view_type view_type, u32 id)
  318. {
  319. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  320. struct vmw_resource *view;
  321. int ret;
  322. if (!ctx_node) {
  323. DRM_ERROR("DX Context not set.\n");
  324. return -EINVAL;
  325. }
  326. view = vmw_view_lookup(sw_context->man, view_type, id);
  327. if (IS_ERR(view))
  328. return PTR_ERR(view);
  329. ret = vmw_view_res_val_add(sw_context, view);
  330. vmw_resource_unreference(&view);
  331. return ret;
  332. }
  333. /**
  334. * vmw_resource_context_res_add - Put resources previously bound to a context on
  335. * the validation list
  336. *
  337. * @dev_priv: Pointer to a device private structure
  338. * @sw_context: Pointer to a software context used for this command submission
  339. * @ctx: Pointer to the context resource
  340. *
  341. * This function puts all resources that were previously bound to @ctx on
  342. * the resource validation list. This is part of the context state reemission
  343. */
  344. static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
  345. struct vmw_sw_context *sw_context,
  346. struct vmw_resource *ctx)
  347. {
  348. struct list_head *binding_list;
  349. struct vmw_ctx_bindinfo *entry;
  350. int ret = 0;
  351. struct vmw_resource *res;
  352. u32 i;
  353. /* Add all cotables to the validation list. */
  354. if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
  355. for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
  356. res = vmw_context_cotable(ctx, i);
  357. if (IS_ERR(res))
  358. continue;
  359. ret = vmw_resource_val_add(sw_context, res, NULL);
  360. vmw_resource_unreference(&res);
  361. if (unlikely(ret != 0))
  362. return ret;
  363. }
  364. }
  365. /* Add all resources bound to the context to the validation list */
  366. mutex_lock(&dev_priv->binding_mutex);
  367. binding_list = vmw_context_binding_list(ctx);
  368. list_for_each_entry(entry, binding_list, ctx_list) {
  369. /* entry->res is not refcounted */
  370. res = vmw_resource_reference_unless_doomed(entry->res);
  371. if (unlikely(res == NULL))
  372. continue;
  373. if (vmw_res_type(entry->res) == vmw_res_view)
  374. ret = vmw_view_res_val_add(sw_context, entry->res);
  375. else
  376. ret = vmw_resource_val_add(sw_context, entry->res,
  377. NULL);
  378. vmw_resource_unreference(&res);
  379. if (unlikely(ret != 0))
  380. break;
  381. }
  382. if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
  383. struct vmw_dma_buffer *dx_query_mob;
  384. dx_query_mob = vmw_context_get_dx_query_mob(ctx);
  385. if (dx_query_mob)
  386. ret = vmw_bo_to_validate_list(sw_context,
  387. dx_query_mob,
  388. true, NULL);
  389. }
  390. mutex_unlock(&dev_priv->binding_mutex);
  391. return ret;
  392. }
  393. /**
  394. * vmw_resource_relocation_add - Add a relocation to the relocation list
  395. *
  396. * @list: Pointer to head of relocation list.
  397. * @res: The resource.
  398. * @offset: Offset into the command buffer currently being parsed where the
  399. * id that needs fixup is located. Granularity is one byte.
  400. * @rel_type: Relocation type.
  401. */
  402. static int vmw_resource_relocation_add(struct list_head *list,
  403. const struct vmw_resource *res,
  404. unsigned long offset,
  405. enum vmw_resource_relocation_type
  406. rel_type)
  407. {
  408. struct vmw_resource_relocation *rel;
  409. rel = kmalloc(sizeof(*rel), GFP_KERNEL);
  410. if (unlikely(!rel)) {
  411. DRM_ERROR("Failed to allocate a resource relocation.\n");
  412. return -ENOMEM;
  413. }
  414. rel->res = res;
  415. rel->offset = offset;
  416. rel->rel_type = rel_type;
  417. list_add_tail(&rel->head, list);
  418. return 0;
  419. }
  420. /**
  421. * vmw_resource_relocations_free - Free all relocations on a list
  422. *
  423. * @list: Pointer to the head of the relocation list.
  424. */
  425. static void vmw_resource_relocations_free(struct list_head *list)
  426. {
  427. struct vmw_resource_relocation *rel, *n;
  428. list_for_each_entry_safe(rel, n, list, head) {
  429. list_del(&rel->head);
  430. kfree(rel);
  431. }
  432. }
  433. /**
  434. * vmw_resource_relocations_apply - Apply all relocations on a list
  435. *
  436. * @cb: Pointer to the start of the command buffer bein patch. This need
  437. * not be the same buffer as the one being parsed when the relocation
  438. * list was built, but the contents must be the same modulo the
  439. * resource ids.
  440. * @list: Pointer to the head of the relocation list.
  441. */
  442. static void vmw_resource_relocations_apply(uint32_t *cb,
  443. struct list_head *list)
  444. {
  445. struct vmw_resource_relocation *rel;
  446. /* Validate the struct vmw_resource_relocation member size */
  447. BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
  448. BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
  449. list_for_each_entry(rel, list, head) {
  450. u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
  451. switch (rel->rel_type) {
  452. case vmw_res_rel_normal:
  453. *addr = rel->res->id;
  454. break;
  455. case vmw_res_rel_nop:
  456. *addr = SVGA_3D_CMD_NOP;
  457. break;
  458. default:
  459. if (rel->res->id == -1)
  460. *addr = SVGA_3D_CMD_NOP;
  461. break;
  462. }
  463. }
  464. }
  465. static int vmw_cmd_invalid(struct vmw_private *dev_priv,
  466. struct vmw_sw_context *sw_context,
  467. SVGA3dCmdHeader *header)
  468. {
  469. return capable(CAP_SYS_ADMIN) ? : -EINVAL;
  470. }
  471. static int vmw_cmd_ok(struct vmw_private *dev_priv,
  472. struct vmw_sw_context *sw_context,
  473. SVGA3dCmdHeader *header)
  474. {
  475. return 0;
  476. }
  477. /**
  478. * vmw_bo_to_validate_list - add a bo to a validate list
  479. *
  480. * @sw_context: The software context used for this command submission batch.
  481. * @bo: The buffer object to add.
  482. * @validate_as_mob: Validate this buffer as a MOB.
  483. * @p_val_node: If non-NULL Will be updated with the validate node number
  484. * on return.
  485. *
  486. * Returns -EINVAL if the limit of number of buffer objects per command
  487. * submission is reached.
  488. */
  489. static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
  490. struct vmw_dma_buffer *vbo,
  491. bool validate_as_mob,
  492. uint32_t *p_val_node)
  493. {
  494. uint32_t val_node;
  495. struct vmw_validate_buffer *vval_buf;
  496. struct ttm_validate_buffer *val_buf;
  497. struct drm_hash_item *hash;
  498. int ret;
  499. if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
  500. &hash) == 0)) {
  501. vval_buf = container_of(hash, struct vmw_validate_buffer,
  502. hash);
  503. if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
  504. DRM_ERROR("Inconsistent buffer usage.\n");
  505. return -EINVAL;
  506. }
  507. val_buf = &vval_buf->base;
  508. val_node = vval_buf - sw_context->val_bufs;
  509. } else {
  510. val_node = sw_context->cur_val_buf;
  511. if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
  512. DRM_ERROR("Max number of DMA buffers per submission "
  513. "exceeded.\n");
  514. return -EINVAL;
  515. }
  516. vval_buf = &sw_context->val_bufs[val_node];
  517. vval_buf->hash.key = (unsigned long) vbo;
  518. ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
  519. if (unlikely(ret != 0)) {
  520. DRM_ERROR("Failed to initialize a buffer validation "
  521. "entry.\n");
  522. return ret;
  523. }
  524. ++sw_context->cur_val_buf;
  525. val_buf = &vval_buf->base;
  526. val_buf->bo = ttm_bo_reference(&vbo->base);
  527. val_buf->shared = false;
  528. list_add_tail(&val_buf->head, &sw_context->validate_nodes);
  529. vval_buf->validate_as_mob = validate_as_mob;
  530. }
  531. if (p_val_node)
  532. *p_val_node = val_node;
  533. return 0;
  534. }
  535. /**
  536. * vmw_resources_reserve - Reserve all resources on the sw_context's
  537. * resource list.
  538. *
  539. * @sw_context: Pointer to the software context.
  540. *
  541. * Note that since vmware's command submission currently is protected by
  542. * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
  543. * since only a single thread at once will attempt this.
  544. */
  545. static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
  546. {
  547. struct vmw_resource_val_node *val;
  548. int ret = 0;
  549. list_for_each_entry(val, &sw_context->resource_list, head) {
  550. struct vmw_resource *res = val->res;
  551. ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
  552. if (unlikely(ret != 0))
  553. return ret;
  554. if (res->backup) {
  555. struct vmw_dma_buffer *vbo = res->backup;
  556. ret = vmw_bo_to_validate_list
  557. (sw_context, vbo,
  558. vmw_resource_needs_backup(res), NULL);
  559. if (unlikely(ret != 0))
  560. return ret;
  561. }
  562. }
  563. if (sw_context->dx_query_mob) {
  564. struct vmw_dma_buffer *expected_dx_query_mob;
  565. expected_dx_query_mob =
  566. vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
  567. if (expected_dx_query_mob &&
  568. expected_dx_query_mob != sw_context->dx_query_mob) {
  569. ret = -EINVAL;
  570. }
  571. }
  572. return ret;
  573. }
  574. /**
  575. * vmw_resources_validate - Validate all resources on the sw_context's
  576. * resource list.
  577. *
  578. * @sw_context: Pointer to the software context.
  579. *
  580. * Before this function is called, all resource backup buffers must have
  581. * been validated.
  582. */
  583. static int vmw_resources_validate(struct vmw_sw_context *sw_context)
  584. {
  585. struct vmw_resource_val_node *val;
  586. int ret;
  587. list_for_each_entry(val, &sw_context->resource_list, head) {
  588. struct vmw_resource *res = val->res;
  589. struct vmw_dma_buffer *backup = res->backup;
  590. ret = vmw_resource_validate(res);
  591. if (unlikely(ret != 0)) {
  592. if (ret != -ERESTARTSYS)
  593. DRM_ERROR("Failed to validate resource.\n");
  594. return ret;
  595. }
  596. /* Check if the resource switched backup buffer */
  597. if (backup && res->backup && (backup != res->backup)) {
  598. struct vmw_dma_buffer *vbo = res->backup;
  599. ret = vmw_bo_to_validate_list
  600. (sw_context, vbo,
  601. vmw_resource_needs_backup(res), NULL);
  602. if (ret) {
  603. ttm_bo_unreserve(&vbo->base);
  604. return ret;
  605. }
  606. }
  607. }
  608. return 0;
  609. }
  610. /**
  611. * vmw_cmd_res_reloc_add - Add a resource to a software context's
  612. * relocation- and validation lists.
  613. *
  614. * @dev_priv: Pointer to a struct vmw_private identifying the device.
  615. * @sw_context: Pointer to the software context.
  616. * @id_loc: Pointer to where the id that needs translation is located.
  617. * @res: Valid pointer to a struct vmw_resource.
  618. * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
  619. * used for this resource is returned here.
  620. */
  621. static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
  622. struct vmw_sw_context *sw_context,
  623. uint32_t *id_loc,
  624. struct vmw_resource *res,
  625. struct vmw_resource_val_node **p_val)
  626. {
  627. int ret;
  628. struct vmw_resource_val_node *node;
  629. *p_val = NULL;
  630. ret = vmw_resource_relocation_add(&sw_context->res_relocations,
  631. res,
  632. vmw_ptr_diff(sw_context->buf_start,
  633. id_loc),
  634. vmw_res_rel_normal);
  635. if (unlikely(ret != 0))
  636. return ret;
  637. ret = vmw_resource_val_add(sw_context, res, &node);
  638. if (unlikely(ret != 0))
  639. return ret;
  640. if (p_val)
  641. *p_val = node;
  642. return 0;
  643. }
  644. /**
  645. * vmw_cmd_res_check - Check that a resource is present and if so, put it
  646. * on the resource validate list unless it's already there.
  647. *
  648. * @dev_priv: Pointer to a device private structure.
  649. * @sw_context: Pointer to the software context.
  650. * @res_type: Resource type.
  651. * @converter: User-space visisble type specific information.
  652. * @id_loc: Pointer to the location in the command buffer currently being
  653. * parsed from where the user-space resource id handle is located.
  654. * @p_val: Pointer to pointer to resource validalidation node. Populated
  655. * on exit.
  656. */
  657. static int
  658. vmw_cmd_res_check(struct vmw_private *dev_priv,
  659. struct vmw_sw_context *sw_context,
  660. enum vmw_res_type res_type,
  661. const struct vmw_user_resource_conv *converter,
  662. uint32_t *id_loc,
  663. struct vmw_resource_val_node **p_val)
  664. {
  665. struct vmw_res_cache_entry *rcache =
  666. &sw_context->res_cache[res_type];
  667. struct vmw_resource *res;
  668. struct vmw_resource_val_node *node;
  669. int ret;
  670. if (*id_loc == SVGA3D_INVALID_ID) {
  671. if (p_val)
  672. *p_val = NULL;
  673. if (res_type == vmw_res_context) {
  674. DRM_ERROR("Illegal context invalid id.\n");
  675. return -EINVAL;
  676. }
  677. return 0;
  678. }
  679. /*
  680. * Fastpath in case of repeated commands referencing the same
  681. * resource
  682. */
  683. if (likely(rcache->valid && *id_loc == rcache->handle)) {
  684. const struct vmw_resource *res = rcache->res;
  685. rcache->node->first_usage = false;
  686. if (p_val)
  687. *p_val = rcache->node;
  688. return vmw_resource_relocation_add
  689. (&sw_context->res_relocations, res,
  690. vmw_ptr_diff(sw_context->buf_start, id_loc),
  691. vmw_res_rel_normal);
  692. }
  693. ret = vmw_user_resource_lookup_handle(dev_priv,
  694. sw_context->fp->tfile,
  695. *id_loc,
  696. converter,
  697. &res);
  698. if (unlikely(ret != 0)) {
  699. DRM_ERROR("Could not find or use resource 0x%08x.\n",
  700. (unsigned) *id_loc);
  701. dump_stack();
  702. return ret;
  703. }
  704. rcache->valid = true;
  705. rcache->res = res;
  706. rcache->handle = *id_loc;
  707. ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
  708. res, &node);
  709. if (unlikely(ret != 0))
  710. goto out_no_reloc;
  711. rcache->node = node;
  712. if (p_val)
  713. *p_val = node;
  714. vmw_resource_unreference(&res);
  715. return 0;
  716. out_no_reloc:
  717. BUG_ON(sw_context->error_resource != NULL);
  718. sw_context->error_resource = res;
  719. return ret;
  720. }
  721. /**
  722. * vmw_rebind_dx_query - Rebind DX query associated with the context
  723. *
  724. * @ctx_res: context the query belongs to
  725. *
  726. * This function assumes binding_mutex is held.
  727. */
  728. static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
  729. {
  730. struct vmw_private *dev_priv = ctx_res->dev_priv;
  731. struct vmw_dma_buffer *dx_query_mob;
  732. struct {
  733. SVGA3dCmdHeader header;
  734. SVGA3dCmdDXBindAllQuery body;
  735. } *cmd;
  736. dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
  737. if (!dx_query_mob || dx_query_mob->dx_query_ctx)
  738. return 0;
  739. cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
  740. if (cmd == NULL) {
  741. DRM_ERROR("Failed to rebind queries.\n");
  742. return -ENOMEM;
  743. }
  744. cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
  745. cmd->header.size = sizeof(cmd->body);
  746. cmd->body.cid = ctx_res->id;
  747. cmd->body.mobid = dx_query_mob->base.mem.start;
  748. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  749. vmw_context_bind_dx_query(ctx_res, dx_query_mob);
  750. return 0;
  751. }
  752. /**
  753. * vmw_rebind_contexts - Rebind all resources previously bound to
  754. * referenced contexts.
  755. *
  756. * @sw_context: Pointer to the software context.
  757. *
  758. * Rebind context binding points that have been scrubbed because of eviction.
  759. */
  760. static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
  761. {
  762. struct vmw_resource_val_node *val;
  763. int ret;
  764. list_for_each_entry(val, &sw_context->resource_list, head) {
  765. if (unlikely(!val->staged_bindings))
  766. break;
  767. ret = vmw_binding_rebind_all
  768. (vmw_context_binding_state(val->res));
  769. if (unlikely(ret != 0)) {
  770. if (ret != -ERESTARTSYS)
  771. DRM_ERROR("Failed to rebind context.\n");
  772. return ret;
  773. }
  774. ret = vmw_rebind_all_dx_query(val->res);
  775. if (ret != 0)
  776. return ret;
  777. }
  778. return 0;
  779. }
  780. /**
  781. * vmw_view_bindings_add - Add an array of view bindings to a context
  782. * binding state tracker.
  783. *
  784. * @sw_context: The execbuf state used for this command.
  785. * @view_type: View type for the bindings.
  786. * @binding_type: Binding type for the bindings.
  787. * @shader_slot: The shader slot to user for the bindings.
  788. * @view_ids: Array of view ids to be bound.
  789. * @num_views: Number of view ids in @view_ids.
  790. * @first_slot: The binding slot to be used for the first view id in @view_ids.
  791. */
  792. static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
  793. enum vmw_view_type view_type,
  794. enum vmw_ctx_binding_type binding_type,
  795. uint32 shader_slot,
  796. uint32 view_ids[], u32 num_views,
  797. u32 first_slot)
  798. {
  799. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  800. struct vmw_cmdbuf_res_manager *man;
  801. u32 i;
  802. int ret;
  803. if (!ctx_node) {
  804. DRM_ERROR("DX Context not set.\n");
  805. return -EINVAL;
  806. }
  807. man = sw_context->man;
  808. for (i = 0; i < num_views; ++i) {
  809. struct vmw_ctx_bindinfo_view binding;
  810. struct vmw_resource *view = NULL;
  811. if (view_ids[i] != SVGA3D_INVALID_ID) {
  812. view = vmw_view_lookup(man, view_type, view_ids[i]);
  813. if (IS_ERR(view)) {
  814. DRM_ERROR("View not found.\n");
  815. return PTR_ERR(view);
  816. }
  817. ret = vmw_view_res_val_add(sw_context, view);
  818. if (ret) {
  819. DRM_ERROR("Could not add view to "
  820. "validation list.\n");
  821. vmw_resource_unreference(&view);
  822. return ret;
  823. }
  824. }
  825. binding.bi.ctx = ctx_node->res;
  826. binding.bi.res = view;
  827. binding.bi.bt = binding_type;
  828. binding.shader_slot = shader_slot;
  829. binding.slot = first_slot + i;
  830. vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
  831. shader_slot, binding.slot);
  832. if (view)
  833. vmw_resource_unreference(&view);
  834. }
  835. return 0;
  836. }
  837. /**
  838. * vmw_cmd_cid_check - Check a command header for valid context information.
  839. *
  840. * @dev_priv: Pointer to a device private structure.
  841. * @sw_context: Pointer to the software context.
  842. * @header: A command header with an embedded user-space context handle.
  843. *
  844. * Convenience function: Call vmw_cmd_res_check with the user-space context
  845. * handle embedded in @header.
  846. */
  847. static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
  848. struct vmw_sw_context *sw_context,
  849. SVGA3dCmdHeader *header)
  850. {
  851. struct vmw_cid_cmd {
  852. SVGA3dCmdHeader header;
  853. uint32_t cid;
  854. } *cmd;
  855. cmd = container_of(header, struct vmw_cid_cmd, header);
  856. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  857. user_context_converter, &cmd->cid, NULL);
  858. }
  859. static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
  860. struct vmw_sw_context *sw_context,
  861. SVGA3dCmdHeader *header)
  862. {
  863. struct vmw_sid_cmd {
  864. SVGA3dCmdHeader header;
  865. SVGA3dCmdSetRenderTarget body;
  866. } *cmd;
  867. struct vmw_resource_val_node *ctx_node;
  868. struct vmw_resource_val_node *res_node;
  869. int ret;
  870. cmd = container_of(header, struct vmw_sid_cmd, header);
  871. if (cmd->body.type >= SVGA3D_RT_MAX) {
  872. DRM_ERROR("Illegal render target type %u.\n",
  873. (unsigned) cmd->body.type);
  874. return -EINVAL;
  875. }
  876. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  877. user_context_converter, &cmd->body.cid,
  878. &ctx_node);
  879. if (unlikely(ret != 0))
  880. return ret;
  881. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  882. user_surface_converter,
  883. &cmd->body.target.sid, &res_node);
  884. if (unlikely(ret != 0))
  885. return ret;
  886. if (dev_priv->has_mob) {
  887. struct vmw_ctx_bindinfo_view binding;
  888. binding.bi.ctx = ctx_node->res;
  889. binding.bi.res = res_node ? res_node->res : NULL;
  890. binding.bi.bt = vmw_ctx_binding_rt;
  891. binding.slot = cmd->body.type;
  892. vmw_binding_add(ctx_node->staged_bindings,
  893. &binding.bi, 0, binding.slot);
  894. }
  895. return 0;
  896. }
  897. static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
  898. struct vmw_sw_context *sw_context,
  899. SVGA3dCmdHeader *header)
  900. {
  901. struct vmw_sid_cmd {
  902. SVGA3dCmdHeader header;
  903. SVGA3dCmdSurfaceCopy body;
  904. } *cmd;
  905. int ret;
  906. cmd = container_of(header, struct vmw_sid_cmd, header);
  907. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  908. user_surface_converter,
  909. &cmd->body.src.sid, NULL);
  910. if (ret)
  911. return ret;
  912. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  913. user_surface_converter,
  914. &cmd->body.dest.sid, NULL);
  915. }
  916. static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
  917. struct vmw_sw_context *sw_context,
  918. SVGA3dCmdHeader *header)
  919. {
  920. struct {
  921. SVGA3dCmdHeader header;
  922. SVGA3dCmdDXBufferCopy body;
  923. } *cmd;
  924. int ret;
  925. cmd = container_of(header, typeof(*cmd), header);
  926. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  927. user_surface_converter,
  928. &cmd->body.src, NULL);
  929. if (ret != 0)
  930. return ret;
  931. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  932. user_surface_converter,
  933. &cmd->body.dest, NULL);
  934. }
  935. static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
  936. struct vmw_sw_context *sw_context,
  937. SVGA3dCmdHeader *header)
  938. {
  939. struct {
  940. SVGA3dCmdHeader header;
  941. SVGA3dCmdDXPredCopyRegion body;
  942. } *cmd;
  943. int ret;
  944. cmd = container_of(header, typeof(*cmd), header);
  945. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  946. user_surface_converter,
  947. &cmd->body.srcSid, NULL);
  948. if (ret != 0)
  949. return ret;
  950. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  951. user_surface_converter,
  952. &cmd->body.dstSid, NULL);
  953. }
  954. static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
  955. struct vmw_sw_context *sw_context,
  956. SVGA3dCmdHeader *header)
  957. {
  958. struct vmw_sid_cmd {
  959. SVGA3dCmdHeader header;
  960. SVGA3dCmdSurfaceStretchBlt body;
  961. } *cmd;
  962. int ret;
  963. cmd = container_of(header, struct vmw_sid_cmd, header);
  964. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  965. user_surface_converter,
  966. &cmd->body.src.sid, NULL);
  967. if (unlikely(ret != 0))
  968. return ret;
  969. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  970. user_surface_converter,
  971. &cmd->body.dest.sid, NULL);
  972. }
  973. static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
  974. struct vmw_sw_context *sw_context,
  975. SVGA3dCmdHeader *header)
  976. {
  977. struct vmw_sid_cmd {
  978. SVGA3dCmdHeader header;
  979. SVGA3dCmdBlitSurfaceToScreen body;
  980. } *cmd;
  981. cmd = container_of(header, struct vmw_sid_cmd, header);
  982. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  983. user_surface_converter,
  984. &cmd->body.srcImage.sid, NULL);
  985. }
  986. static int vmw_cmd_present_check(struct vmw_private *dev_priv,
  987. struct vmw_sw_context *sw_context,
  988. SVGA3dCmdHeader *header)
  989. {
  990. struct vmw_sid_cmd {
  991. SVGA3dCmdHeader header;
  992. SVGA3dCmdPresent body;
  993. } *cmd;
  994. cmd = container_of(header, struct vmw_sid_cmd, header);
  995. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  996. user_surface_converter, &cmd->body.sid,
  997. NULL);
  998. }
  999. /**
  1000. * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
  1001. *
  1002. * @dev_priv: The device private structure.
  1003. * @new_query_bo: The new buffer holding query results.
  1004. * @sw_context: The software context used for this command submission.
  1005. *
  1006. * This function checks whether @new_query_bo is suitable for holding
  1007. * query results, and if another buffer currently is pinned for query
  1008. * results. If so, the function prepares the state of @sw_context for
  1009. * switching pinned buffers after successful submission of the current
  1010. * command batch.
  1011. */
  1012. static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
  1013. struct vmw_dma_buffer *new_query_bo,
  1014. struct vmw_sw_context *sw_context)
  1015. {
  1016. struct vmw_res_cache_entry *ctx_entry =
  1017. &sw_context->res_cache[vmw_res_context];
  1018. int ret;
  1019. BUG_ON(!ctx_entry->valid);
  1020. sw_context->last_query_ctx = ctx_entry->res;
  1021. if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
  1022. if (unlikely(new_query_bo->base.num_pages > 4)) {
  1023. DRM_ERROR("Query buffer too large.\n");
  1024. return -EINVAL;
  1025. }
  1026. if (unlikely(sw_context->cur_query_bo != NULL)) {
  1027. sw_context->needs_post_query_barrier = true;
  1028. ret = vmw_bo_to_validate_list(sw_context,
  1029. sw_context->cur_query_bo,
  1030. dev_priv->has_mob, NULL);
  1031. if (unlikely(ret != 0))
  1032. return ret;
  1033. }
  1034. sw_context->cur_query_bo = new_query_bo;
  1035. ret = vmw_bo_to_validate_list(sw_context,
  1036. dev_priv->dummy_query_bo,
  1037. dev_priv->has_mob, NULL);
  1038. if (unlikely(ret != 0))
  1039. return ret;
  1040. }
  1041. return 0;
  1042. }
  1043. /**
  1044. * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
  1045. *
  1046. * @dev_priv: The device private structure.
  1047. * @sw_context: The software context used for this command submission batch.
  1048. *
  1049. * This function will check if we're switching query buffers, and will then,
  1050. * issue a dummy occlusion query wait used as a query barrier. When the fence
  1051. * object following that query wait has signaled, we are sure that all
  1052. * preceding queries have finished, and the old query buffer can be unpinned.
  1053. * However, since both the new query buffer and the old one are fenced with
  1054. * that fence, we can do an asynchronus unpin now, and be sure that the
  1055. * old query buffer won't be moved until the fence has signaled.
  1056. *
  1057. * As mentioned above, both the new - and old query buffers need to be fenced
  1058. * using a sequence emitted *after* calling this function.
  1059. */
  1060. static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
  1061. struct vmw_sw_context *sw_context)
  1062. {
  1063. /*
  1064. * The validate list should still hold references to all
  1065. * contexts here.
  1066. */
  1067. if (sw_context->needs_post_query_barrier) {
  1068. struct vmw_res_cache_entry *ctx_entry =
  1069. &sw_context->res_cache[vmw_res_context];
  1070. struct vmw_resource *ctx;
  1071. int ret;
  1072. BUG_ON(!ctx_entry->valid);
  1073. ctx = ctx_entry->res;
  1074. ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
  1075. if (unlikely(ret != 0))
  1076. DRM_ERROR("Out of fifo space for dummy query.\n");
  1077. }
  1078. if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
  1079. if (dev_priv->pinned_bo) {
  1080. vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
  1081. vmw_dmabuf_unreference(&dev_priv->pinned_bo);
  1082. }
  1083. if (!sw_context->needs_post_query_barrier) {
  1084. vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
  1085. /*
  1086. * We pin also the dummy_query_bo buffer so that we
  1087. * don't need to validate it when emitting
  1088. * dummy queries in context destroy paths.
  1089. */
  1090. if (!dev_priv->dummy_query_bo_pinned) {
  1091. vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
  1092. true);
  1093. dev_priv->dummy_query_bo_pinned = true;
  1094. }
  1095. BUG_ON(sw_context->last_query_ctx == NULL);
  1096. dev_priv->query_cid = sw_context->last_query_ctx->id;
  1097. dev_priv->query_cid_valid = true;
  1098. dev_priv->pinned_bo =
  1099. vmw_dmabuf_reference(sw_context->cur_query_bo);
  1100. }
  1101. }
  1102. }
  1103. /**
  1104. * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
  1105. * handle to a MOB id.
  1106. *
  1107. * @dev_priv: Pointer to a device private structure.
  1108. * @sw_context: The software context used for this command batch validation.
  1109. * @id: Pointer to the user-space handle to be translated.
  1110. * @vmw_bo_p: Points to a location that, on successful return will carry
  1111. * a reference-counted pointer to the DMA buffer identified by the
  1112. * user-space handle in @id.
  1113. *
  1114. * This function saves information needed to translate a user-space buffer
  1115. * handle to a MOB id. The translation does not take place immediately, but
  1116. * during a call to vmw_apply_relocations(). This function builds a relocation
  1117. * list and a list of buffers to validate. The former needs to be freed using
  1118. * either vmw_apply_relocations() or vmw_free_relocations(). The latter
  1119. * needs to be freed using vmw_clear_validations.
  1120. */
  1121. static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
  1122. struct vmw_sw_context *sw_context,
  1123. SVGAMobId *id,
  1124. struct vmw_dma_buffer **vmw_bo_p)
  1125. {
  1126. struct vmw_dma_buffer *vmw_bo = NULL;
  1127. uint32_t handle = *id;
  1128. struct vmw_relocation *reloc;
  1129. int ret;
  1130. ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
  1131. NULL);
  1132. if (unlikely(ret != 0)) {
  1133. DRM_ERROR("Could not find or use MOB buffer.\n");
  1134. ret = -EINVAL;
  1135. goto out_no_reloc;
  1136. }
  1137. if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
  1138. DRM_ERROR("Max number relocations per submission"
  1139. " exceeded\n");
  1140. ret = -EINVAL;
  1141. goto out_no_reloc;
  1142. }
  1143. reloc = &sw_context->relocs[sw_context->cur_reloc++];
  1144. reloc->mob_loc = id;
  1145. reloc->location = NULL;
  1146. ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
  1147. if (unlikely(ret != 0))
  1148. goto out_no_reloc;
  1149. *vmw_bo_p = vmw_bo;
  1150. return 0;
  1151. out_no_reloc:
  1152. vmw_dmabuf_unreference(&vmw_bo);
  1153. *vmw_bo_p = NULL;
  1154. return ret;
  1155. }
  1156. /**
  1157. * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
  1158. * handle to a valid SVGAGuestPtr
  1159. *
  1160. * @dev_priv: Pointer to a device private structure.
  1161. * @sw_context: The software context used for this command batch validation.
  1162. * @ptr: Pointer to the user-space handle to be translated.
  1163. * @vmw_bo_p: Points to a location that, on successful return will carry
  1164. * a reference-counted pointer to the DMA buffer identified by the
  1165. * user-space handle in @id.
  1166. *
  1167. * This function saves information needed to translate a user-space buffer
  1168. * handle to a valid SVGAGuestPtr. The translation does not take place
  1169. * immediately, but during a call to vmw_apply_relocations().
  1170. * This function builds a relocation list and a list of buffers to validate.
  1171. * The former needs to be freed using either vmw_apply_relocations() or
  1172. * vmw_free_relocations(). The latter needs to be freed using
  1173. * vmw_clear_validations.
  1174. */
  1175. static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
  1176. struct vmw_sw_context *sw_context,
  1177. SVGAGuestPtr *ptr,
  1178. struct vmw_dma_buffer **vmw_bo_p)
  1179. {
  1180. struct vmw_dma_buffer *vmw_bo = NULL;
  1181. uint32_t handle = ptr->gmrId;
  1182. struct vmw_relocation *reloc;
  1183. int ret;
  1184. ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
  1185. NULL);
  1186. if (unlikely(ret != 0)) {
  1187. DRM_ERROR("Could not find or use GMR region.\n");
  1188. ret = -EINVAL;
  1189. goto out_no_reloc;
  1190. }
  1191. if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
  1192. DRM_ERROR("Max number relocations per submission"
  1193. " exceeded\n");
  1194. ret = -EINVAL;
  1195. goto out_no_reloc;
  1196. }
  1197. reloc = &sw_context->relocs[sw_context->cur_reloc++];
  1198. reloc->location = ptr;
  1199. ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
  1200. if (unlikely(ret != 0))
  1201. goto out_no_reloc;
  1202. *vmw_bo_p = vmw_bo;
  1203. return 0;
  1204. out_no_reloc:
  1205. vmw_dmabuf_unreference(&vmw_bo);
  1206. *vmw_bo_p = NULL;
  1207. return ret;
  1208. }
  1209. /**
  1210. * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
  1211. *
  1212. * @dev_priv: Pointer to a device private struct.
  1213. * @sw_context: The software context used for this command submission.
  1214. * @header: Pointer to the command header in the command stream.
  1215. *
  1216. * This function adds the new query into the query COTABLE
  1217. */
  1218. static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
  1219. struct vmw_sw_context *sw_context,
  1220. SVGA3dCmdHeader *header)
  1221. {
  1222. struct vmw_dx_define_query_cmd {
  1223. SVGA3dCmdHeader header;
  1224. SVGA3dCmdDXDefineQuery q;
  1225. } *cmd;
  1226. int ret;
  1227. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  1228. struct vmw_resource *cotable_res;
  1229. if (ctx_node == NULL) {
  1230. DRM_ERROR("DX Context not set for query.\n");
  1231. return -EINVAL;
  1232. }
  1233. cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
  1234. if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
  1235. cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
  1236. return -EINVAL;
  1237. cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
  1238. ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
  1239. vmw_resource_unreference(&cotable_res);
  1240. return ret;
  1241. }
  1242. /**
  1243. * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
  1244. *
  1245. * @dev_priv: Pointer to a device private struct.
  1246. * @sw_context: The software context used for this command submission.
  1247. * @header: Pointer to the command header in the command stream.
  1248. *
  1249. * The query bind operation will eventually associate the query ID
  1250. * with its backing MOB. In this function, we take the user mode
  1251. * MOB ID and use vmw_translate_mob_ptr() to translate it to its
  1252. * kernel mode equivalent.
  1253. */
  1254. static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
  1255. struct vmw_sw_context *sw_context,
  1256. SVGA3dCmdHeader *header)
  1257. {
  1258. struct vmw_dx_bind_query_cmd {
  1259. SVGA3dCmdHeader header;
  1260. SVGA3dCmdDXBindQuery q;
  1261. } *cmd;
  1262. struct vmw_dma_buffer *vmw_bo;
  1263. int ret;
  1264. cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
  1265. /*
  1266. * Look up the buffer pointed to by q.mobid, put it on the relocation
  1267. * list so its kernel mode MOB ID can be filled in later
  1268. */
  1269. ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
  1270. &vmw_bo);
  1271. if (ret != 0)
  1272. return ret;
  1273. sw_context->dx_query_mob = vmw_bo;
  1274. sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
  1275. vmw_dmabuf_unreference(&vmw_bo);
  1276. return ret;
  1277. }
  1278. /**
  1279. * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
  1280. *
  1281. * @dev_priv: Pointer to a device private struct.
  1282. * @sw_context: The software context used for this command submission.
  1283. * @header: Pointer to the command header in the command stream.
  1284. */
  1285. static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
  1286. struct vmw_sw_context *sw_context,
  1287. SVGA3dCmdHeader *header)
  1288. {
  1289. struct vmw_begin_gb_query_cmd {
  1290. SVGA3dCmdHeader header;
  1291. SVGA3dCmdBeginGBQuery q;
  1292. } *cmd;
  1293. cmd = container_of(header, struct vmw_begin_gb_query_cmd,
  1294. header);
  1295. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  1296. user_context_converter, &cmd->q.cid,
  1297. NULL);
  1298. }
  1299. /**
  1300. * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
  1301. *
  1302. * @dev_priv: Pointer to a device private struct.
  1303. * @sw_context: The software context used for this command submission.
  1304. * @header: Pointer to the command header in the command stream.
  1305. */
  1306. static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
  1307. struct vmw_sw_context *sw_context,
  1308. SVGA3dCmdHeader *header)
  1309. {
  1310. struct vmw_begin_query_cmd {
  1311. SVGA3dCmdHeader header;
  1312. SVGA3dCmdBeginQuery q;
  1313. } *cmd;
  1314. cmd = container_of(header, struct vmw_begin_query_cmd,
  1315. header);
  1316. if (unlikely(dev_priv->has_mob)) {
  1317. struct {
  1318. SVGA3dCmdHeader header;
  1319. SVGA3dCmdBeginGBQuery q;
  1320. } gb_cmd;
  1321. BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
  1322. gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
  1323. gb_cmd.header.size = cmd->header.size;
  1324. gb_cmd.q.cid = cmd->q.cid;
  1325. gb_cmd.q.type = cmd->q.type;
  1326. memcpy(cmd, &gb_cmd, sizeof(*cmd));
  1327. return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
  1328. }
  1329. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  1330. user_context_converter, &cmd->q.cid,
  1331. NULL);
  1332. }
  1333. /**
  1334. * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
  1335. *
  1336. * @dev_priv: Pointer to a device private struct.
  1337. * @sw_context: The software context used for this command submission.
  1338. * @header: Pointer to the command header in the command stream.
  1339. */
  1340. static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
  1341. struct vmw_sw_context *sw_context,
  1342. SVGA3dCmdHeader *header)
  1343. {
  1344. struct vmw_dma_buffer *vmw_bo;
  1345. struct vmw_query_cmd {
  1346. SVGA3dCmdHeader header;
  1347. SVGA3dCmdEndGBQuery q;
  1348. } *cmd;
  1349. int ret;
  1350. cmd = container_of(header, struct vmw_query_cmd, header);
  1351. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  1352. if (unlikely(ret != 0))
  1353. return ret;
  1354. ret = vmw_translate_mob_ptr(dev_priv, sw_context,
  1355. &cmd->q.mobid,
  1356. &vmw_bo);
  1357. if (unlikely(ret != 0))
  1358. return ret;
  1359. ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
  1360. vmw_dmabuf_unreference(&vmw_bo);
  1361. return ret;
  1362. }
  1363. /**
  1364. * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
  1365. *
  1366. * @dev_priv: Pointer to a device private struct.
  1367. * @sw_context: The software context used for this command submission.
  1368. * @header: Pointer to the command header in the command stream.
  1369. */
  1370. static int vmw_cmd_end_query(struct vmw_private *dev_priv,
  1371. struct vmw_sw_context *sw_context,
  1372. SVGA3dCmdHeader *header)
  1373. {
  1374. struct vmw_dma_buffer *vmw_bo;
  1375. struct vmw_query_cmd {
  1376. SVGA3dCmdHeader header;
  1377. SVGA3dCmdEndQuery q;
  1378. } *cmd;
  1379. int ret;
  1380. cmd = container_of(header, struct vmw_query_cmd, header);
  1381. if (dev_priv->has_mob) {
  1382. struct {
  1383. SVGA3dCmdHeader header;
  1384. SVGA3dCmdEndGBQuery q;
  1385. } gb_cmd;
  1386. BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
  1387. gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
  1388. gb_cmd.header.size = cmd->header.size;
  1389. gb_cmd.q.cid = cmd->q.cid;
  1390. gb_cmd.q.type = cmd->q.type;
  1391. gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
  1392. gb_cmd.q.offset = cmd->q.guestResult.offset;
  1393. memcpy(cmd, &gb_cmd, sizeof(*cmd));
  1394. return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
  1395. }
  1396. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  1397. if (unlikely(ret != 0))
  1398. return ret;
  1399. ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  1400. &cmd->q.guestResult,
  1401. &vmw_bo);
  1402. if (unlikely(ret != 0))
  1403. return ret;
  1404. ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
  1405. vmw_dmabuf_unreference(&vmw_bo);
  1406. return ret;
  1407. }
  1408. /**
  1409. * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
  1410. *
  1411. * @dev_priv: Pointer to a device private struct.
  1412. * @sw_context: The software context used for this command submission.
  1413. * @header: Pointer to the command header in the command stream.
  1414. */
  1415. static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
  1416. struct vmw_sw_context *sw_context,
  1417. SVGA3dCmdHeader *header)
  1418. {
  1419. struct vmw_dma_buffer *vmw_bo;
  1420. struct vmw_query_cmd {
  1421. SVGA3dCmdHeader header;
  1422. SVGA3dCmdWaitForGBQuery q;
  1423. } *cmd;
  1424. int ret;
  1425. cmd = container_of(header, struct vmw_query_cmd, header);
  1426. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  1427. if (unlikely(ret != 0))
  1428. return ret;
  1429. ret = vmw_translate_mob_ptr(dev_priv, sw_context,
  1430. &cmd->q.mobid,
  1431. &vmw_bo);
  1432. if (unlikely(ret != 0))
  1433. return ret;
  1434. vmw_dmabuf_unreference(&vmw_bo);
  1435. return 0;
  1436. }
  1437. /**
  1438. * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
  1439. *
  1440. * @dev_priv: Pointer to a device private struct.
  1441. * @sw_context: The software context used for this command submission.
  1442. * @header: Pointer to the command header in the command stream.
  1443. */
  1444. static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
  1445. struct vmw_sw_context *sw_context,
  1446. SVGA3dCmdHeader *header)
  1447. {
  1448. struct vmw_dma_buffer *vmw_bo;
  1449. struct vmw_query_cmd {
  1450. SVGA3dCmdHeader header;
  1451. SVGA3dCmdWaitForQuery q;
  1452. } *cmd;
  1453. int ret;
  1454. cmd = container_of(header, struct vmw_query_cmd, header);
  1455. if (dev_priv->has_mob) {
  1456. struct {
  1457. SVGA3dCmdHeader header;
  1458. SVGA3dCmdWaitForGBQuery q;
  1459. } gb_cmd;
  1460. BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
  1461. gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
  1462. gb_cmd.header.size = cmd->header.size;
  1463. gb_cmd.q.cid = cmd->q.cid;
  1464. gb_cmd.q.type = cmd->q.type;
  1465. gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
  1466. gb_cmd.q.offset = cmd->q.guestResult.offset;
  1467. memcpy(cmd, &gb_cmd, sizeof(*cmd));
  1468. return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
  1469. }
  1470. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  1471. if (unlikely(ret != 0))
  1472. return ret;
  1473. ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  1474. &cmd->q.guestResult,
  1475. &vmw_bo);
  1476. if (unlikely(ret != 0))
  1477. return ret;
  1478. vmw_dmabuf_unreference(&vmw_bo);
  1479. return 0;
  1480. }
  1481. static int vmw_cmd_dma(struct vmw_private *dev_priv,
  1482. struct vmw_sw_context *sw_context,
  1483. SVGA3dCmdHeader *header)
  1484. {
  1485. struct vmw_dma_buffer *vmw_bo = NULL;
  1486. struct vmw_surface *srf = NULL;
  1487. struct vmw_dma_cmd {
  1488. SVGA3dCmdHeader header;
  1489. SVGA3dCmdSurfaceDMA dma;
  1490. } *cmd;
  1491. int ret;
  1492. SVGA3dCmdSurfaceDMASuffix *suffix;
  1493. uint32_t bo_size;
  1494. cmd = container_of(header, struct vmw_dma_cmd, header);
  1495. suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
  1496. header->size - sizeof(*suffix));
  1497. /* Make sure device and verifier stays in sync. */
  1498. if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
  1499. DRM_ERROR("Invalid DMA suffix size.\n");
  1500. return -EINVAL;
  1501. }
  1502. ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  1503. &cmd->dma.guest.ptr,
  1504. &vmw_bo);
  1505. if (unlikely(ret != 0))
  1506. return ret;
  1507. /* Make sure DMA doesn't cross BO boundaries. */
  1508. bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
  1509. if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
  1510. DRM_ERROR("Invalid DMA offset.\n");
  1511. return -EINVAL;
  1512. }
  1513. bo_size -= cmd->dma.guest.ptr.offset;
  1514. if (unlikely(suffix->maximumOffset > bo_size))
  1515. suffix->maximumOffset = bo_size;
  1516. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  1517. user_surface_converter, &cmd->dma.host.sid,
  1518. NULL);
  1519. if (unlikely(ret != 0)) {
  1520. if (unlikely(ret != -ERESTARTSYS))
  1521. DRM_ERROR("could not find surface for DMA.\n");
  1522. goto out_no_surface;
  1523. }
  1524. srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
  1525. vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
  1526. header);
  1527. out_no_surface:
  1528. vmw_dmabuf_unreference(&vmw_bo);
  1529. return ret;
  1530. }
  1531. static int vmw_cmd_draw(struct vmw_private *dev_priv,
  1532. struct vmw_sw_context *sw_context,
  1533. SVGA3dCmdHeader *header)
  1534. {
  1535. struct vmw_draw_cmd {
  1536. SVGA3dCmdHeader header;
  1537. SVGA3dCmdDrawPrimitives body;
  1538. } *cmd;
  1539. SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
  1540. (unsigned long)header + sizeof(*cmd));
  1541. SVGA3dPrimitiveRange *range;
  1542. uint32_t i;
  1543. uint32_t maxnum;
  1544. int ret;
  1545. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  1546. if (unlikely(ret != 0))
  1547. return ret;
  1548. cmd = container_of(header, struct vmw_draw_cmd, header);
  1549. maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
  1550. if (unlikely(cmd->body.numVertexDecls > maxnum)) {
  1551. DRM_ERROR("Illegal number of vertex declarations.\n");
  1552. return -EINVAL;
  1553. }
  1554. for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
  1555. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  1556. user_surface_converter,
  1557. &decl->array.surfaceId, NULL);
  1558. if (unlikely(ret != 0))
  1559. return ret;
  1560. }
  1561. maxnum = (header->size - sizeof(cmd->body) -
  1562. cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
  1563. if (unlikely(cmd->body.numRanges > maxnum)) {
  1564. DRM_ERROR("Illegal number of index ranges.\n");
  1565. return -EINVAL;
  1566. }
  1567. range = (SVGA3dPrimitiveRange *) decl;
  1568. for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
  1569. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  1570. user_surface_converter,
  1571. &range->indexArray.surfaceId, NULL);
  1572. if (unlikely(ret != 0))
  1573. return ret;
  1574. }
  1575. return 0;
  1576. }
  1577. static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
  1578. struct vmw_sw_context *sw_context,
  1579. SVGA3dCmdHeader *header)
  1580. {
  1581. struct vmw_tex_state_cmd {
  1582. SVGA3dCmdHeader header;
  1583. SVGA3dCmdSetTextureState state;
  1584. } *cmd;
  1585. SVGA3dTextureState *last_state = (SVGA3dTextureState *)
  1586. ((unsigned long) header + header->size + sizeof(header));
  1587. SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
  1588. ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
  1589. struct vmw_resource_val_node *ctx_node;
  1590. struct vmw_resource_val_node *res_node;
  1591. int ret;
  1592. cmd = container_of(header, struct vmw_tex_state_cmd,
  1593. header);
  1594. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  1595. user_context_converter, &cmd->state.cid,
  1596. &ctx_node);
  1597. if (unlikely(ret != 0))
  1598. return ret;
  1599. for (; cur_state < last_state; ++cur_state) {
  1600. if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
  1601. continue;
  1602. if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
  1603. DRM_ERROR("Illegal texture/sampler unit %u.\n",
  1604. (unsigned) cur_state->stage);
  1605. return -EINVAL;
  1606. }
  1607. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  1608. user_surface_converter,
  1609. &cur_state->value, &res_node);
  1610. if (unlikely(ret != 0))
  1611. return ret;
  1612. if (dev_priv->has_mob) {
  1613. struct vmw_ctx_bindinfo_tex binding;
  1614. binding.bi.ctx = ctx_node->res;
  1615. binding.bi.res = res_node ? res_node->res : NULL;
  1616. binding.bi.bt = vmw_ctx_binding_tex;
  1617. binding.texture_stage = cur_state->stage;
  1618. vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
  1619. 0, binding.texture_stage);
  1620. }
  1621. }
  1622. return 0;
  1623. }
  1624. static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
  1625. struct vmw_sw_context *sw_context,
  1626. void *buf)
  1627. {
  1628. struct vmw_dma_buffer *vmw_bo;
  1629. int ret;
  1630. struct {
  1631. uint32_t header;
  1632. SVGAFifoCmdDefineGMRFB body;
  1633. } *cmd = buf;
  1634. ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  1635. &cmd->body.ptr,
  1636. &vmw_bo);
  1637. if (unlikely(ret != 0))
  1638. return ret;
  1639. vmw_dmabuf_unreference(&vmw_bo);
  1640. return ret;
  1641. }
  1642. /**
  1643. * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
  1644. * switching
  1645. *
  1646. * @dev_priv: Pointer to a device private struct.
  1647. * @sw_context: The software context being used for this batch.
  1648. * @val_node: The validation node representing the resource.
  1649. * @buf_id: Pointer to the user-space backup buffer handle in the command
  1650. * stream.
  1651. * @backup_offset: Offset of backup into MOB.
  1652. *
  1653. * This function prepares for registering a switch of backup buffers
  1654. * in the resource metadata just prior to unreserving. It's basically a wrapper
  1655. * around vmw_cmd_res_switch_backup with a different interface.
  1656. */
  1657. static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
  1658. struct vmw_sw_context *sw_context,
  1659. struct vmw_resource_val_node *val_node,
  1660. uint32_t *buf_id,
  1661. unsigned long backup_offset)
  1662. {
  1663. struct vmw_dma_buffer *dma_buf;
  1664. int ret;
  1665. ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
  1666. if (ret)
  1667. return ret;
  1668. val_node->switching_backup = true;
  1669. if (val_node->first_usage)
  1670. val_node->no_buffer_needed = true;
  1671. vmw_dmabuf_unreference(&val_node->new_backup);
  1672. val_node->new_backup = dma_buf;
  1673. val_node->new_backup_offset = backup_offset;
  1674. return 0;
  1675. }
  1676. /**
  1677. * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
  1678. *
  1679. * @dev_priv: Pointer to a device private struct.
  1680. * @sw_context: The software context being used for this batch.
  1681. * @res_type: The resource type.
  1682. * @converter: Information about user-space binding for this resource type.
  1683. * @res_id: Pointer to the user-space resource handle in the command stream.
  1684. * @buf_id: Pointer to the user-space backup buffer handle in the command
  1685. * stream.
  1686. * @backup_offset: Offset of backup into MOB.
  1687. *
  1688. * This function prepares for registering a switch of backup buffers
  1689. * in the resource metadata just prior to unreserving. It's basically a wrapper
  1690. * around vmw_cmd_res_switch_backup with a different interface.
  1691. */
  1692. static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
  1693. struct vmw_sw_context *sw_context,
  1694. enum vmw_res_type res_type,
  1695. const struct vmw_user_resource_conv
  1696. *converter,
  1697. uint32_t *res_id,
  1698. uint32_t *buf_id,
  1699. unsigned long backup_offset)
  1700. {
  1701. struct vmw_resource_val_node *val_node;
  1702. int ret;
  1703. ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
  1704. converter, res_id, &val_node);
  1705. if (ret)
  1706. return ret;
  1707. return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
  1708. buf_id, backup_offset);
  1709. }
  1710. /**
  1711. * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
  1712. * command
  1713. *
  1714. * @dev_priv: Pointer to a device private struct.
  1715. * @sw_context: The software context being used for this batch.
  1716. * @header: Pointer to the command header in the command stream.
  1717. */
  1718. static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
  1719. struct vmw_sw_context *sw_context,
  1720. SVGA3dCmdHeader *header)
  1721. {
  1722. struct vmw_bind_gb_surface_cmd {
  1723. SVGA3dCmdHeader header;
  1724. SVGA3dCmdBindGBSurface body;
  1725. } *cmd;
  1726. cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
  1727. return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
  1728. user_surface_converter,
  1729. &cmd->body.sid, &cmd->body.mobid,
  1730. 0);
  1731. }
  1732. /**
  1733. * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
  1734. * command
  1735. *
  1736. * @dev_priv: Pointer to a device private struct.
  1737. * @sw_context: The software context being used for this batch.
  1738. * @header: Pointer to the command header in the command stream.
  1739. */
  1740. static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
  1741. struct vmw_sw_context *sw_context,
  1742. SVGA3dCmdHeader *header)
  1743. {
  1744. struct vmw_gb_surface_cmd {
  1745. SVGA3dCmdHeader header;
  1746. SVGA3dCmdUpdateGBImage body;
  1747. } *cmd;
  1748. cmd = container_of(header, struct vmw_gb_surface_cmd, header);
  1749. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  1750. user_surface_converter,
  1751. &cmd->body.image.sid, NULL);
  1752. }
  1753. /**
  1754. * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
  1755. * command
  1756. *
  1757. * @dev_priv: Pointer to a device private struct.
  1758. * @sw_context: The software context being used for this batch.
  1759. * @header: Pointer to the command header in the command stream.
  1760. */
  1761. static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
  1762. struct vmw_sw_context *sw_context,
  1763. SVGA3dCmdHeader *header)
  1764. {
  1765. struct vmw_gb_surface_cmd {
  1766. SVGA3dCmdHeader header;
  1767. SVGA3dCmdUpdateGBSurface body;
  1768. } *cmd;
  1769. cmd = container_of(header, struct vmw_gb_surface_cmd, header);
  1770. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  1771. user_surface_converter,
  1772. &cmd->body.sid, NULL);
  1773. }
  1774. /**
  1775. * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
  1776. * command
  1777. *
  1778. * @dev_priv: Pointer to a device private struct.
  1779. * @sw_context: The software context being used for this batch.
  1780. * @header: Pointer to the command header in the command stream.
  1781. */
  1782. static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
  1783. struct vmw_sw_context *sw_context,
  1784. SVGA3dCmdHeader *header)
  1785. {
  1786. struct vmw_gb_surface_cmd {
  1787. SVGA3dCmdHeader header;
  1788. SVGA3dCmdReadbackGBImage body;
  1789. } *cmd;
  1790. cmd = container_of(header, struct vmw_gb_surface_cmd, header);
  1791. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  1792. user_surface_converter,
  1793. &cmd->body.image.sid, NULL);
  1794. }
  1795. /**
  1796. * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
  1797. * command
  1798. *
  1799. * @dev_priv: Pointer to a device private struct.
  1800. * @sw_context: The software context being used for this batch.
  1801. * @header: Pointer to the command header in the command stream.
  1802. */
  1803. static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
  1804. struct vmw_sw_context *sw_context,
  1805. SVGA3dCmdHeader *header)
  1806. {
  1807. struct vmw_gb_surface_cmd {
  1808. SVGA3dCmdHeader header;
  1809. SVGA3dCmdReadbackGBSurface body;
  1810. } *cmd;
  1811. cmd = container_of(header, struct vmw_gb_surface_cmd, header);
  1812. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  1813. user_surface_converter,
  1814. &cmd->body.sid, NULL);
  1815. }
  1816. /**
  1817. * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
  1818. * command
  1819. *
  1820. * @dev_priv: Pointer to a device private struct.
  1821. * @sw_context: The software context being used for this batch.
  1822. * @header: Pointer to the command header in the command stream.
  1823. */
  1824. static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
  1825. struct vmw_sw_context *sw_context,
  1826. SVGA3dCmdHeader *header)
  1827. {
  1828. struct vmw_gb_surface_cmd {
  1829. SVGA3dCmdHeader header;
  1830. SVGA3dCmdInvalidateGBImage body;
  1831. } *cmd;
  1832. cmd = container_of(header, struct vmw_gb_surface_cmd, header);
  1833. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  1834. user_surface_converter,
  1835. &cmd->body.image.sid, NULL);
  1836. }
  1837. /**
  1838. * vmw_cmd_invalidate_gb_surface - Validate an
  1839. * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
  1840. *
  1841. * @dev_priv: Pointer to a device private struct.
  1842. * @sw_context: The software context being used for this batch.
  1843. * @header: Pointer to the command header in the command stream.
  1844. */
  1845. static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
  1846. struct vmw_sw_context *sw_context,
  1847. SVGA3dCmdHeader *header)
  1848. {
  1849. struct vmw_gb_surface_cmd {
  1850. SVGA3dCmdHeader header;
  1851. SVGA3dCmdInvalidateGBSurface body;
  1852. } *cmd;
  1853. cmd = container_of(header, struct vmw_gb_surface_cmd, header);
  1854. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  1855. user_surface_converter,
  1856. &cmd->body.sid, NULL);
  1857. }
  1858. /**
  1859. * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
  1860. * command
  1861. *
  1862. * @dev_priv: Pointer to a device private struct.
  1863. * @sw_context: The software context being used for this batch.
  1864. * @header: Pointer to the command header in the command stream.
  1865. */
  1866. static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
  1867. struct vmw_sw_context *sw_context,
  1868. SVGA3dCmdHeader *header)
  1869. {
  1870. struct vmw_shader_define_cmd {
  1871. SVGA3dCmdHeader header;
  1872. SVGA3dCmdDefineShader body;
  1873. } *cmd;
  1874. int ret;
  1875. size_t size;
  1876. struct vmw_resource_val_node *val;
  1877. cmd = container_of(header, struct vmw_shader_define_cmd,
  1878. header);
  1879. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  1880. user_context_converter, &cmd->body.cid,
  1881. &val);
  1882. if (unlikely(ret != 0))
  1883. return ret;
  1884. if (unlikely(!dev_priv->has_mob))
  1885. return 0;
  1886. size = cmd->header.size - sizeof(cmd->body);
  1887. ret = vmw_compat_shader_add(dev_priv,
  1888. vmw_context_res_man(val->res),
  1889. cmd->body.shid, cmd + 1,
  1890. cmd->body.type, size,
  1891. &sw_context->staged_cmd_res);
  1892. if (unlikely(ret != 0))
  1893. return ret;
  1894. return vmw_resource_relocation_add(&sw_context->res_relocations,
  1895. NULL,
  1896. vmw_ptr_diff(sw_context->buf_start,
  1897. &cmd->header.id),
  1898. vmw_res_rel_nop);
  1899. }
  1900. /**
  1901. * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
  1902. * command
  1903. *
  1904. * @dev_priv: Pointer to a device private struct.
  1905. * @sw_context: The software context being used for this batch.
  1906. * @header: Pointer to the command header in the command stream.
  1907. */
  1908. static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
  1909. struct vmw_sw_context *sw_context,
  1910. SVGA3dCmdHeader *header)
  1911. {
  1912. struct vmw_shader_destroy_cmd {
  1913. SVGA3dCmdHeader header;
  1914. SVGA3dCmdDestroyShader body;
  1915. } *cmd;
  1916. int ret;
  1917. struct vmw_resource_val_node *val;
  1918. cmd = container_of(header, struct vmw_shader_destroy_cmd,
  1919. header);
  1920. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  1921. user_context_converter, &cmd->body.cid,
  1922. &val);
  1923. if (unlikely(ret != 0))
  1924. return ret;
  1925. if (unlikely(!dev_priv->has_mob))
  1926. return 0;
  1927. ret = vmw_shader_remove(vmw_context_res_man(val->res),
  1928. cmd->body.shid,
  1929. cmd->body.type,
  1930. &sw_context->staged_cmd_res);
  1931. if (unlikely(ret != 0))
  1932. return ret;
  1933. return vmw_resource_relocation_add(&sw_context->res_relocations,
  1934. NULL,
  1935. vmw_ptr_diff(sw_context->buf_start,
  1936. &cmd->header.id),
  1937. vmw_res_rel_nop);
  1938. }
  1939. /**
  1940. * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
  1941. * command
  1942. *
  1943. * @dev_priv: Pointer to a device private struct.
  1944. * @sw_context: The software context being used for this batch.
  1945. * @header: Pointer to the command header in the command stream.
  1946. */
  1947. static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
  1948. struct vmw_sw_context *sw_context,
  1949. SVGA3dCmdHeader *header)
  1950. {
  1951. struct vmw_set_shader_cmd {
  1952. SVGA3dCmdHeader header;
  1953. SVGA3dCmdSetShader body;
  1954. } *cmd;
  1955. struct vmw_resource_val_node *ctx_node, *res_node = NULL;
  1956. struct vmw_ctx_bindinfo_shader binding;
  1957. struct vmw_resource *res = NULL;
  1958. int ret;
  1959. cmd = container_of(header, struct vmw_set_shader_cmd,
  1960. header);
  1961. if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
  1962. DRM_ERROR("Illegal shader type %u.\n",
  1963. (unsigned) cmd->body.type);
  1964. return -EINVAL;
  1965. }
  1966. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  1967. user_context_converter, &cmd->body.cid,
  1968. &ctx_node);
  1969. if (unlikely(ret != 0))
  1970. return ret;
  1971. if (!dev_priv->has_mob)
  1972. return 0;
  1973. if (cmd->body.shid != SVGA3D_INVALID_ID) {
  1974. res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
  1975. cmd->body.shid,
  1976. cmd->body.type);
  1977. if (!IS_ERR(res)) {
  1978. ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
  1979. &cmd->body.shid, res,
  1980. &res_node);
  1981. vmw_resource_unreference(&res);
  1982. if (unlikely(ret != 0))
  1983. return ret;
  1984. }
  1985. }
  1986. if (!res_node) {
  1987. ret = vmw_cmd_res_check(dev_priv, sw_context,
  1988. vmw_res_shader,
  1989. user_shader_converter,
  1990. &cmd->body.shid, &res_node);
  1991. if (unlikely(ret != 0))
  1992. return ret;
  1993. }
  1994. binding.bi.ctx = ctx_node->res;
  1995. binding.bi.res = res_node ? res_node->res : NULL;
  1996. binding.bi.bt = vmw_ctx_binding_shader;
  1997. binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
  1998. vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
  1999. binding.shader_slot, 0);
  2000. return 0;
  2001. }
  2002. /**
  2003. * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
  2004. * command
  2005. *
  2006. * @dev_priv: Pointer to a device private struct.
  2007. * @sw_context: The software context being used for this batch.
  2008. * @header: Pointer to the command header in the command stream.
  2009. */
  2010. static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
  2011. struct vmw_sw_context *sw_context,
  2012. SVGA3dCmdHeader *header)
  2013. {
  2014. struct vmw_set_shader_const_cmd {
  2015. SVGA3dCmdHeader header;
  2016. SVGA3dCmdSetShaderConst body;
  2017. } *cmd;
  2018. int ret;
  2019. cmd = container_of(header, struct vmw_set_shader_const_cmd,
  2020. header);
  2021. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  2022. user_context_converter, &cmd->body.cid,
  2023. NULL);
  2024. if (unlikely(ret != 0))
  2025. return ret;
  2026. if (dev_priv->has_mob)
  2027. header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
  2028. return 0;
  2029. }
  2030. /**
  2031. * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
  2032. * command
  2033. *
  2034. * @dev_priv: Pointer to a device private struct.
  2035. * @sw_context: The software context being used for this batch.
  2036. * @header: Pointer to the command header in the command stream.
  2037. */
  2038. static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
  2039. struct vmw_sw_context *sw_context,
  2040. SVGA3dCmdHeader *header)
  2041. {
  2042. struct vmw_bind_gb_shader_cmd {
  2043. SVGA3dCmdHeader header;
  2044. SVGA3dCmdBindGBShader body;
  2045. } *cmd;
  2046. cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
  2047. header);
  2048. return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
  2049. user_shader_converter,
  2050. &cmd->body.shid, &cmd->body.mobid,
  2051. cmd->body.offsetInBytes);
  2052. }
  2053. /**
  2054. * vmw_cmd_dx_set_single_constant_buffer - Validate an
  2055. * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
  2056. *
  2057. * @dev_priv: Pointer to a device private struct.
  2058. * @sw_context: The software context being used for this batch.
  2059. * @header: Pointer to the command header in the command stream.
  2060. */
  2061. static int
  2062. vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
  2063. struct vmw_sw_context *sw_context,
  2064. SVGA3dCmdHeader *header)
  2065. {
  2066. struct {
  2067. SVGA3dCmdHeader header;
  2068. SVGA3dCmdDXSetSingleConstantBuffer body;
  2069. } *cmd;
  2070. struct vmw_resource_val_node *res_node = NULL;
  2071. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  2072. struct vmw_ctx_bindinfo_cb binding;
  2073. int ret;
  2074. if (unlikely(ctx_node == NULL)) {
  2075. DRM_ERROR("DX Context not set.\n");
  2076. return -EINVAL;
  2077. }
  2078. cmd = container_of(header, typeof(*cmd), header);
  2079. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  2080. user_surface_converter,
  2081. &cmd->body.sid, &res_node);
  2082. if (unlikely(ret != 0))
  2083. return ret;
  2084. binding.bi.ctx = ctx_node->res;
  2085. binding.bi.res = res_node ? res_node->res : NULL;
  2086. binding.bi.bt = vmw_ctx_binding_cb;
  2087. binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
  2088. binding.offset = cmd->body.offsetInBytes;
  2089. binding.size = cmd->body.sizeInBytes;
  2090. binding.slot = cmd->body.slot;
  2091. if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
  2092. binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
  2093. DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
  2094. (unsigned) cmd->body.type,
  2095. (unsigned) binding.slot);
  2096. return -EINVAL;
  2097. }
  2098. vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
  2099. binding.shader_slot, binding.slot);
  2100. return 0;
  2101. }
  2102. /**
  2103. * vmw_cmd_dx_set_shader_res - Validate an
  2104. * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
  2105. *
  2106. * @dev_priv: Pointer to a device private struct.
  2107. * @sw_context: The software context being used for this batch.
  2108. * @header: Pointer to the command header in the command stream.
  2109. */
  2110. static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
  2111. struct vmw_sw_context *sw_context,
  2112. SVGA3dCmdHeader *header)
  2113. {
  2114. struct {
  2115. SVGA3dCmdHeader header;
  2116. SVGA3dCmdDXSetShaderResources body;
  2117. } *cmd = container_of(header, typeof(*cmd), header);
  2118. u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
  2119. sizeof(SVGA3dShaderResourceViewId);
  2120. if ((u64) cmd->body.startView + (u64) num_sr_view >
  2121. (u64) SVGA3D_DX_MAX_SRVIEWS ||
  2122. cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
  2123. DRM_ERROR("Invalid shader binding.\n");
  2124. return -EINVAL;
  2125. }
  2126. return vmw_view_bindings_add(sw_context, vmw_view_sr,
  2127. vmw_ctx_binding_sr,
  2128. cmd->body.type - SVGA3D_SHADERTYPE_MIN,
  2129. (void *) &cmd[1], num_sr_view,
  2130. cmd->body.startView);
  2131. }
  2132. /**
  2133. * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
  2134. * command
  2135. *
  2136. * @dev_priv: Pointer to a device private struct.
  2137. * @sw_context: The software context being used for this batch.
  2138. * @header: Pointer to the command header in the command stream.
  2139. */
  2140. static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
  2141. struct vmw_sw_context *sw_context,
  2142. SVGA3dCmdHeader *header)
  2143. {
  2144. struct {
  2145. SVGA3dCmdHeader header;
  2146. SVGA3dCmdDXSetShader body;
  2147. } *cmd;
  2148. struct vmw_resource *res = NULL;
  2149. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  2150. struct vmw_ctx_bindinfo_shader binding;
  2151. int ret = 0;
  2152. if (unlikely(ctx_node == NULL)) {
  2153. DRM_ERROR("DX Context not set.\n");
  2154. return -EINVAL;
  2155. }
  2156. cmd = container_of(header, typeof(*cmd), header);
  2157. if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
  2158. DRM_ERROR("Illegal shader type %u.\n",
  2159. (unsigned) cmd->body.type);
  2160. return -EINVAL;
  2161. }
  2162. if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
  2163. res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
  2164. if (IS_ERR(res)) {
  2165. DRM_ERROR("Could not find shader for binding.\n");
  2166. return PTR_ERR(res);
  2167. }
  2168. ret = vmw_resource_val_add(sw_context, res, NULL);
  2169. if (ret)
  2170. goto out_unref;
  2171. }
  2172. binding.bi.ctx = ctx_node->res;
  2173. binding.bi.res = res;
  2174. binding.bi.bt = vmw_ctx_binding_dx_shader;
  2175. binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
  2176. vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
  2177. binding.shader_slot, 0);
  2178. out_unref:
  2179. if (res)
  2180. vmw_resource_unreference(&res);
  2181. return ret;
  2182. }
  2183. /**
  2184. * vmw_cmd_dx_set_vertex_buffers - Validates an
  2185. * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
  2186. *
  2187. * @dev_priv: Pointer to a device private struct.
  2188. * @sw_context: The software context being used for this batch.
  2189. * @header: Pointer to the command header in the command stream.
  2190. */
  2191. static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
  2192. struct vmw_sw_context *sw_context,
  2193. SVGA3dCmdHeader *header)
  2194. {
  2195. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  2196. struct vmw_ctx_bindinfo_vb binding;
  2197. struct vmw_resource_val_node *res_node;
  2198. struct {
  2199. SVGA3dCmdHeader header;
  2200. SVGA3dCmdDXSetVertexBuffers body;
  2201. SVGA3dVertexBuffer buf[];
  2202. } *cmd;
  2203. int i, ret, num;
  2204. if (unlikely(ctx_node == NULL)) {
  2205. DRM_ERROR("DX Context not set.\n");
  2206. return -EINVAL;
  2207. }
  2208. cmd = container_of(header, typeof(*cmd), header);
  2209. num = (cmd->header.size - sizeof(cmd->body)) /
  2210. sizeof(SVGA3dVertexBuffer);
  2211. if ((u64)num + (u64)cmd->body.startBuffer >
  2212. (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
  2213. DRM_ERROR("Invalid number of vertex buffers.\n");
  2214. return -EINVAL;
  2215. }
  2216. for (i = 0; i < num; i++) {
  2217. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  2218. user_surface_converter,
  2219. &cmd->buf[i].sid, &res_node);
  2220. if (unlikely(ret != 0))
  2221. return ret;
  2222. binding.bi.ctx = ctx_node->res;
  2223. binding.bi.bt = vmw_ctx_binding_vb;
  2224. binding.bi.res = ((res_node) ? res_node->res : NULL);
  2225. binding.offset = cmd->buf[i].offset;
  2226. binding.stride = cmd->buf[i].stride;
  2227. binding.slot = i + cmd->body.startBuffer;
  2228. vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
  2229. 0, binding.slot);
  2230. }
  2231. return 0;
  2232. }
  2233. /**
  2234. * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
  2235. * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
  2236. *
  2237. * @dev_priv: Pointer to a device private struct.
  2238. * @sw_context: The software context being used for this batch.
  2239. * @header: Pointer to the command header in the command stream.
  2240. */
  2241. static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
  2242. struct vmw_sw_context *sw_context,
  2243. SVGA3dCmdHeader *header)
  2244. {
  2245. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  2246. struct vmw_ctx_bindinfo_ib binding;
  2247. struct vmw_resource_val_node *res_node;
  2248. struct {
  2249. SVGA3dCmdHeader header;
  2250. SVGA3dCmdDXSetIndexBuffer body;
  2251. } *cmd;
  2252. int ret;
  2253. if (unlikely(ctx_node == NULL)) {
  2254. DRM_ERROR("DX Context not set.\n");
  2255. return -EINVAL;
  2256. }
  2257. cmd = container_of(header, typeof(*cmd), header);
  2258. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  2259. user_surface_converter,
  2260. &cmd->body.sid, &res_node);
  2261. if (unlikely(ret != 0))
  2262. return ret;
  2263. binding.bi.ctx = ctx_node->res;
  2264. binding.bi.res = ((res_node) ? res_node->res : NULL);
  2265. binding.bi.bt = vmw_ctx_binding_ib;
  2266. binding.offset = cmd->body.offset;
  2267. binding.format = cmd->body.format;
  2268. vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
  2269. return 0;
  2270. }
  2271. /**
  2272. * vmw_cmd_dx_set_rendertarget - Validate an
  2273. * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
  2274. *
  2275. * @dev_priv: Pointer to a device private struct.
  2276. * @sw_context: The software context being used for this batch.
  2277. * @header: Pointer to the command header in the command stream.
  2278. */
  2279. static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
  2280. struct vmw_sw_context *sw_context,
  2281. SVGA3dCmdHeader *header)
  2282. {
  2283. struct {
  2284. SVGA3dCmdHeader header;
  2285. SVGA3dCmdDXSetRenderTargets body;
  2286. } *cmd = container_of(header, typeof(*cmd), header);
  2287. int ret;
  2288. u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
  2289. sizeof(SVGA3dRenderTargetViewId);
  2290. if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
  2291. DRM_ERROR("Invalid DX Rendertarget binding.\n");
  2292. return -EINVAL;
  2293. }
  2294. ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
  2295. vmw_ctx_binding_ds, 0,
  2296. &cmd->body.depthStencilViewId, 1, 0);
  2297. if (ret)
  2298. return ret;
  2299. return vmw_view_bindings_add(sw_context, vmw_view_rt,
  2300. vmw_ctx_binding_dx_rt, 0,
  2301. (void *)&cmd[1], num_rt_view, 0);
  2302. }
  2303. /**
  2304. * vmw_cmd_dx_clear_rendertarget_view - Validate an
  2305. * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
  2306. *
  2307. * @dev_priv: Pointer to a device private struct.
  2308. * @sw_context: The software context being used for this batch.
  2309. * @header: Pointer to the command header in the command stream.
  2310. */
  2311. static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
  2312. struct vmw_sw_context *sw_context,
  2313. SVGA3dCmdHeader *header)
  2314. {
  2315. struct {
  2316. SVGA3dCmdHeader header;
  2317. SVGA3dCmdDXClearRenderTargetView body;
  2318. } *cmd = container_of(header, typeof(*cmd), header);
  2319. return vmw_view_id_val_add(sw_context, vmw_view_rt,
  2320. cmd->body.renderTargetViewId);
  2321. }
  2322. /**
  2323. * vmw_cmd_dx_clear_rendertarget_view - Validate an
  2324. * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
  2325. *
  2326. * @dev_priv: Pointer to a device private struct.
  2327. * @sw_context: The software context being used for this batch.
  2328. * @header: Pointer to the command header in the command stream.
  2329. */
  2330. static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
  2331. struct vmw_sw_context *sw_context,
  2332. SVGA3dCmdHeader *header)
  2333. {
  2334. struct {
  2335. SVGA3dCmdHeader header;
  2336. SVGA3dCmdDXClearDepthStencilView body;
  2337. } *cmd = container_of(header, typeof(*cmd), header);
  2338. return vmw_view_id_val_add(sw_context, vmw_view_ds,
  2339. cmd->body.depthStencilViewId);
  2340. }
  2341. static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
  2342. struct vmw_sw_context *sw_context,
  2343. SVGA3dCmdHeader *header)
  2344. {
  2345. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  2346. struct vmw_resource_val_node *srf_node;
  2347. struct vmw_resource *res;
  2348. enum vmw_view_type view_type;
  2349. int ret;
  2350. /*
  2351. * This is based on the fact that all affected define commands have
  2352. * the same initial command body layout.
  2353. */
  2354. struct {
  2355. SVGA3dCmdHeader header;
  2356. uint32 defined_id;
  2357. uint32 sid;
  2358. } *cmd;
  2359. if (unlikely(ctx_node == NULL)) {
  2360. DRM_ERROR("DX Context not set.\n");
  2361. return -EINVAL;
  2362. }
  2363. view_type = vmw_view_cmd_to_type(header->id);
  2364. cmd = container_of(header, typeof(*cmd), header);
  2365. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  2366. user_surface_converter,
  2367. &cmd->sid, &srf_node);
  2368. if (unlikely(ret != 0))
  2369. return ret;
  2370. res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
  2371. ret = vmw_cotable_notify(res, cmd->defined_id);
  2372. vmw_resource_unreference(&res);
  2373. if (unlikely(ret != 0))
  2374. return ret;
  2375. return vmw_view_add(sw_context->man,
  2376. ctx_node->res,
  2377. srf_node->res,
  2378. view_type,
  2379. cmd->defined_id,
  2380. header,
  2381. header->size + sizeof(*header),
  2382. &sw_context->staged_cmd_res);
  2383. }
  2384. /**
  2385. * vmw_cmd_dx_set_so_targets - Validate an
  2386. * SVGA_3D_CMD_DX_SET_SOTARGETS command.
  2387. *
  2388. * @dev_priv: Pointer to a device private struct.
  2389. * @sw_context: The software context being used for this batch.
  2390. * @header: Pointer to the command header in the command stream.
  2391. */
  2392. static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
  2393. struct vmw_sw_context *sw_context,
  2394. SVGA3dCmdHeader *header)
  2395. {
  2396. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  2397. struct vmw_ctx_bindinfo_so binding;
  2398. struct vmw_resource_val_node *res_node;
  2399. struct {
  2400. SVGA3dCmdHeader header;
  2401. SVGA3dCmdDXSetSOTargets body;
  2402. SVGA3dSoTarget targets[];
  2403. } *cmd;
  2404. int i, ret, num;
  2405. if (unlikely(ctx_node == NULL)) {
  2406. DRM_ERROR("DX Context not set.\n");
  2407. return -EINVAL;
  2408. }
  2409. cmd = container_of(header, typeof(*cmd), header);
  2410. num = (cmd->header.size - sizeof(cmd->body)) /
  2411. sizeof(SVGA3dSoTarget);
  2412. if (num > SVGA3D_DX_MAX_SOTARGETS) {
  2413. DRM_ERROR("Invalid DX SO binding.\n");
  2414. return -EINVAL;
  2415. }
  2416. for (i = 0; i < num; i++) {
  2417. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  2418. user_surface_converter,
  2419. &cmd->targets[i].sid, &res_node);
  2420. if (unlikely(ret != 0))
  2421. return ret;
  2422. binding.bi.ctx = ctx_node->res;
  2423. binding.bi.res = ((res_node) ? res_node->res : NULL);
  2424. binding.bi.bt = vmw_ctx_binding_so,
  2425. binding.offset = cmd->targets[i].offset;
  2426. binding.size = cmd->targets[i].sizeInBytes;
  2427. binding.slot = i;
  2428. vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
  2429. 0, binding.slot);
  2430. }
  2431. return 0;
  2432. }
  2433. static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
  2434. struct vmw_sw_context *sw_context,
  2435. SVGA3dCmdHeader *header)
  2436. {
  2437. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  2438. struct vmw_resource *res;
  2439. /*
  2440. * This is based on the fact that all affected define commands have
  2441. * the same initial command body layout.
  2442. */
  2443. struct {
  2444. SVGA3dCmdHeader header;
  2445. uint32 defined_id;
  2446. } *cmd;
  2447. enum vmw_so_type so_type;
  2448. int ret;
  2449. if (unlikely(ctx_node == NULL)) {
  2450. DRM_ERROR("DX Context not set.\n");
  2451. return -EINVAL;
  2452. }
  2453. so_type = vmw_so_cmd_to_type(header->id);
  2454. res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
  2455. cmd = container_of(header, typeof(*cmd), header);
  2456. ret = vmw_cotable_notify(res, cmd->defined_id);
  2457. vmw_resource_unreference(&res);
  2458. return ret;
  2459. }
  2460. /**
  2461. * vmw_cmd_dx_check_subresource - Validate an
  2462. * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
  2463. *
  2464. * @dev_priv: Pointer to a device private struct.
  2465. * @sw_context: The software context being used for this batch.
  2466. * @header: Pointer to the command header in the command stream.
  2467. */
  2468. static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
  2469. struct vmw_sw_context *sw_context,
  2470. SVGA3dCmdHeader *header)
  2471. {
  2472. struct {
  2473. SVGA3dCmdHeader header;
  2474. union {
  2475. SVGA3dCmdDXReadbackSubResource r_body;
  2476. SVGA3dCmdDXInvalidateSubResource i_body;
  2477. SVGA3dCmdDXUpdateSubResource u_body;
  2478. SVGA3dSurfaceId sid;
  2479. };
  2480. } *cmd;
  2481. BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
  2482. offsetof(typeof(*cmd), sid));
  2483. BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
  2484. offsetof(typeof(*cmd), sid));
  2485. BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
  2486. offsetof(typeof(*cmd), sid));
  2487. cmd = container_of(header, typeof(*cmd), header);
  2488. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  2489. user_surface_converter,
  2490. &cmd->sid, NULL);
  2491. }
  2492. static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
  2493. struct vmw_sw_context *sw_context,
  2494. SVGA3dCmdHeader *header)
  2495. {
  2496. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  2497. if (unlikely(ctx_node == NULL)) {
  2498. DRM_ERROR("DX Context not set.\n");
  2499. return -EINVAL;
  2500. }
  2501. return 0;
  2502. }
  2503. /**
  2504. * vmw_cmd_dx_view_remove - validate a view remove command and
  2505. * schedule the view resource for removal.
  2506. *
  2507. * @dev_priv: Pointer to a device private struct.
  2508. * @sw_context: The software context being used for this batch.
  2509. * @header: Pointer to the command header in the command stream.
  2510. *
  2511. * Check that the view exists, and if it was not created using this
  2512. * command batch, conditionally make this command a NOP.
  2513. */
  2514. static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
  2515. struct vmw_sw_context *sw_context,
  2516. SVGA3dCmdHeader *header)
  2517. {
  2518. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  2519. struct {
  2520. SVGA3dCmdHeader header;
  2521. union vmw_view_destroy body;
  2522. } *cmd = container_of(header, typeof(*cmd), header);
  2523. enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
  2524. struct vmw_resource *view;
  2525. int ret;
  2526. if (!ctx_node) {
  2527. DRM_ERROR("DX Context not set.\n");
  2528. return -EINVAL;
  2529. }
  2530. ret = vmw_view_remove(sw_context->man,
  2531. cmd->body.view_id, view_type,
  2532. &sw_context->staged_cmd_res,
  2533. &view);
  2534. if (ret || !view)
  2535. return ret;
  2536. /*
  2537. * If the view wasn't created during this command batch, it might
  2538. * have been removed due to a context swapout, so add a
  2539. * relocation to conditionally make this command a NOP to avoid
  2540. * device errors.
  2541. */
  2542. return vmw_resource_relocation_add(&sw_context->res_relocations,
  2543. view,
  2544. vmw_ptr_diff(sw_context->buf_start,
  2545. &cmd->header.id),
  2546. vmw_res_rel_cond_nop);
  2547. }
  2548. /**
  2549. * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
  2550. * command
  2551. *
  2552. * @dev_priv: Pointer to a device private struct.
  2553. * @sw_context: The software context being used for this batch.
  2554. * @header: Pointer to the command header in the command stream.
  2555. */
  2556. static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
  2557. struct vmw_sw_context *sw_context,
  2558. SVGA3dCmdHeader *header)
  2559. {
  2560. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  2561. struct vmw_resource *res;
  2562. struct {
  2563. SVGA3dCmdHeader header;
  2564. SVGA3dCmdDXDefineShader body;
  2565. } *cmd = container_of(header, typeof(*cmd), header);
  2566. int ret;
  2567. if (!ctx_node) {
  2568. DRM_ERROR("DX Context not set.\n");
  2569. return -EINVAL;
  2570. }
  2571. res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
  2572. ret = vmw_cotable_notify(res, cmd->body.shaderId);
  2573. vmw_resource_unreference(&res);
  2574. if (ret)
  2575. return ret;
  2576. return vmw_dx_shader_add(sw_context->man, ctx_node->res,
  2577. cmd->body.shaderId, cmd->body.type,
  2578. &sw_context->staged_cmd_res);
  2579. }
  2580. /**
  2581. * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
  2582. * command
  2583. *
  2584. * @dev_priv: Pointer to a device private struct.
  2585. * @sw_context: The software context being used for this batch.
  2586. * @header: Pointer to the command header in the command stream.
  2587. */
  2588. static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
  2589. struct vmw_sw_context *sw_context,
  2590. SVGA3dCmdHeader *header)
  2591. {
  2592. struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
  2593. struct {
  2594. SVGA3dCmdHeader header;
  2595. SVGA3dCmdDXDestroyShader body;
  2596. } *cmd = container_of(header, typeof(*cmd), header);
  2597. int ret;
  2598. if (!ctx_node) {
  2599. DRM_ERROR("DX Context not set.\n");
  2600. return -EINVAL;
  2601. }
  2602. ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
  2603. &sw_context->staged_cmd_res);
  2604. if (ret)
  2605. DRM_ERROR("Could not find shader to remove.\n");
  2606. return ret;
  2607. }
  2608. /**
  2609. * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
  2610. * command
  2611. *
  2612. * @dev_priv: Pointer to a device private struct.
  2613. * @sw_context: The software context being used for this batch.
  2614. * @header: Pointer to the command header in the command stream.
  2615. */
  2616. static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
  2617. struct vmw_sw_context *sw_context,
  2618. SVGA3dCmdHeader *header)
  2619. {
  2620. struct vmw_resource_val_node *ctx_node;
  2621. struct vmw_resource_val_node *res_node;
  2622. struct vmw_resource *res;
  2623. struct {
  2624. SVGA3dCmdHeader header;
  2625. SVGA3dCmdDXBindShader body;
  2626. } *cmd = container_of(header, typeof(*cmd), header);
  2627. int ret;
  2628. if (cmd->body.cid != SVGA3D_INVALID_ID) {
  2629. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
  2630. user_context_converter,
  2631. &cmd->body.cid, &ctx_node);
  2632. if (ret)
  2633. return ret;
  2634. } else {
  2635. ctx_node = sw_context->dx_ctx_node;
  2636. if (!ctx_node) {
  2637. DRM_ERROR("DX Context not set.\n");
  2638. return -EINVAL;
  2639. }
  2640. }
  2641. res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
  2642. cmd->body.shid, 0);
  2643. if (IS_ERR(res)) {
  2644. DRM_ERROR("Could not find shader to bind.\n");
  2645. return PTR_ERR(res);
  2646. }
  2647. ret = vmw_resource_val_add(sw_context, res, &res_node);
  2648. if (ret) {
  2649. DRM_ERROR("Error creating resource validation node.\n");
  2650. goto out_unref;
  2651. }
  2652. ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
  2653. &cmd->body.mobid,
  2654. cmd->body.offsetInBytes);
  2655. out_unref:
  2656. vmw_resource_unreference(&res);
  2657. return ret;
  2658. }
  2659. /**
  2660. * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
  2661. *
  2662. * @dev_priv: Pointer to a device private struct.
  2663. * @sw_context: The software context being used for this batch.
  2664. * @header: Pointer to the command header in the command stream.
  2665. */
  2666. static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
  2667. struct vmw_sw_context *sw_context,
  2668. SVGA3dCmdHeader *header)
  2669. {
  2670. struct {
  2671. SVGA3dCmdHeader header;
  2672. SVGA3dCmdDXGenMips body;
  2673. } *cmd = container_of(header, typeof(*cmd), header);
  2674. return vmw_view_id_val_add(sw_context, vmw_view_sr,
  2675. cmd->body.shaderResourceViewId);
  2676. }
  2677. /**
  2678. * vmw_cmd_dx_transfer_from_buffer -
  2679. * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
  2680. *
  2681. * @dev_priv: Pointer to a device private struct.
  2682. * @sw_context: The software context being used for this batch.
  2683. * @header: Pointer to the command header in the command stream.
  2684. */
  2685. static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
  2686. struct vmw_sw_context *sw_context,
  2687. SVGA3dCmdHeader *header)
  2688. {
  2689. struct {
  2690. SVGA3dCmdHeader header;
  2691. SVGA3dCmdDXTransferFromBuffer body;
  2692. } *cmd = container_of(header, typeof(*cmd), header);
  2693. int ret;
  2694. ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  2695. user_surface_converter,
  2696. &cmd->body.srcSid, NULL);
  2697. if (ret != 0)
  2698. return ret;
  2699. return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
  2700. user_surface_converter,
  2701. &cmd->body.destSid, NULL);
  2702. }
  2703. static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
  2704. struct vmw_sw_context *sw_context,
  2705. void *buf, uint32_t *size)
  2706. {
  2707. uint32_t size_remaining = *size;
  2708. uint32_t cmd_id;
  2709. cmd_id = ((uint32_t *)buf)[0];
  2710. switch (cmd_id) {
  2711. case SVGA_CMD_UPDATE:
  2712. *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
  2713. break;
  2714. case SVGA_CMD_DEFINE_GMRFB:
  2715. *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
  2716. break;
  2717. case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
  2718. *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
  2719. break;
  2720. case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
  2721. *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
  2722. break;
  2723. default:
  2724. DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
  2725. return -EINVAL;
  2726. }
  2727. if (*size > size_remaining) {
  2728. DRM_ERROR("Invalid SVGA command (size mismatch):"
  2729. " %u.\n", cmd_id);
  2730. return -EINVAL;
  2731. }
  2732. if (unlikely(!sw_context->kernel)) {
  2733. DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
  2734. return -EPERM;
  2735. }
  2736. if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
  2737. return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
  2738. return 0;
  2739. }
  2740. static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
  2741. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
  2742. false, false, false),
  2743. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
  2744. false, false, false),
  2745. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
  2746. true, false, false),
  2747. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
  2748. true, false, false),
  2749. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
  2750. true, false, false),
  2751. VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
  2752. false, false, false),
  2753. VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
  2754. false, false, false),
  2755. VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
  2756. true, false, false),
  2757. VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
  2758. true, false, false),
  2759. VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
  2760. true, false, false),
  2761. VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
  2762. &vmw_cmd_set_render_target_check, true, false, false),
  2763. VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
  2764. true, false, false),
  2765. VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
  2766. true, false, false),
  2767. VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
  2768. true, false, false),
  2769. VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
  2770. true, false, false),
  2771. VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
  2772. true, false, false),
  2773. VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
  2774. true, false, false),
  2775. VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
  2776. true, false, false),
  2777. VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
  2778. false, false, false),
  2779. VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
  2780. true, false, false),
  2781. VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
  2782. true, false, false),
  2783. VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
  2784. true, false, false),
  2785. VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
  2786. true, false, false),
  2787. VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
  2788. true, false, false),
  2789. VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
  2790. true, false, false),
  2791. VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
  2792. true, false, false),
  2793. VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
  2794. true, false, false),
  2795. VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
  2796. true, false, false),
  2797. VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
  2798. true, false, false),
  2799. VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
  2800. &vmw_cmd_blt_surf_screen_check, false, false, false),
  2801. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
  2802. false, false, false),
  2803. VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
  2804. false, false, false),
  2805. VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
  2806. false, false, false),
  2807. VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
  2808. false, false, false),
  2809. VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
  2810. false, false, false),
  2811. VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
  2812. false, false, false),
  2813. VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
  2814. false, false, false),
  2815. VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
  2816. false, false, false),
  2817. VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
  2818. false, false, false),
  2819. VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
  2820. false, false, false),
  2821. VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
  2822. false, false, false),
  2823. VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
  2824. false, false, false),
  2825. VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
  2826. false, false, false),
  2827. VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
  2828. false, false, true),
  2829. VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
  2830. false, false, true),
  2831. VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
  2832. false, false, true),
  2833. VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
  2834. false, false, true),
  2835. VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
  2836. false, false, true),
  2837. VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
  2838. false, false, true),
  2839. VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
  2840. false, false, true),
  2841. VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
  2842. false, false, true),
  2843. VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
  2844. true, false, true),
  2845. VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
  2846. false, false, true),
  2847. VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
  2848. true, false, true),
  2849. VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
  2850. &vmw_cmd_update_gb_surface, true, false, true),
  2851. VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
  2852. &vmw_cmd_readback_gb_image, true, false, true),
  2853. VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
  2854. &vmw_cmd_readback_gb_surface, true, false, true),
  2855. VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
  2856. &vmw_cmd_invalidate_gb_image, true, false, true),
  2857. VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
  2858. &vmw_cmd_invalidate_gb_surface, true, false, true),
  2859. VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
  2860. false, false, true),
  2861. VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
  2862. false, false, true),
  2863. VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
  2864. false, false, true),
  2865. VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
  2866. false, false, true),
  2867. VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
  2868. false, false, true),
  2869. VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
  2870. false, false, true),
  2871. VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
  2872. true, false, true),
  2873. VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
  2874. false, false, true),
  2875. VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
  2876. false, false, false),
  2877. VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
  2878. true, false, true),
  2879. VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
  2880. true, false, true),
  2881. VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
  2882. true, false, true),
  2883. VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
  2884. true, false, true),
  2885. VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
  2886. false, false, true),
  2887. VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
  2888. false, false, true),
  2889. VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
  2890. false, false, true),
  2891. VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
  2892. false, false, true),
  2893. VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
  2894. false, false, true),
  2895. VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
  2896. false, false, true),
  2897. VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
  2898. false, false, true),
  2899. VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
  2900. false, false, true),
  2901. VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
  2902. false, false, true),
  2903. VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
  2904. false, false, true),
  2905. VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
  2906. true, false, true),
  2907. VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
  2908. false, false, true),
  2909. VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
  2910. false, false, true),
  2911. VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
  2912. false, false, true),
  2913. VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
  2914. false, false, true),
  2915. /*
  2916. * DX commands
  2917. */
  2918. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
  2919. false, false, true),
  2920. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
  2921. false, false, true),
  2922. VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
  2923. false, false, true),
  2924. VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
  2925. false, false, true),
  2926. VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
  2927. false, false, true),
  2928. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
  2929. &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
  2930. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
  2931. &vmw_cmd_dx_set_shader_res, true, false, true),
  2932. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
  2933. true, false, true),
  2934. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
  2935. true, false, true),
  2936. VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
  2937. true, false, true),
  2938. VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
  2939. true, false, true),
  2940. VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
  2941. true, false, true),
  2942. VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
  2943. &vmw_cmd_dx_cid_check, true, false, true),
  2944. VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
  2945. true, false, true),
  2946. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
  2947. &vmw_cmd_dx_set_vertex_buffers, true, false, true),
  2948. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
  2949. &vmw_cmd_dx_set_index_buffer, true, false, true),
  2950. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
  2951. &vmw_cmd_dx_set_rendertargets, true, false, true),
  2952. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
  2953. true, false, true),
  2954. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
  2955. &vmw_cmd_dx_cid_check, true, false, true),
  2956. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
  2957. &vmw_cmd_dx_cid_check, true, false, true),
  2958. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
  2959. true, false, true),
  2960. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
  2961. true, false, true),
  2962. VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
  2963. true, false, true),
  2964. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
  2965. &vmw_cmd_dx_cid_check, true, false, true),
  2966. VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
  2967. true, false, true),
  2968. VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
  2969. true, false, true),
  2970. VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
  2971. true, false, true),
  2972. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
  2973. true, false, true),
  2974. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
  2975. true, false, true),
  2976. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
  2977. true, false, true),
  2978. VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
  2979. &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
  2980. VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
  2981. &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
  2982. VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
  2983. true, false, true),
  2984. VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
  2985. true, false, true),
  2986. VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
  2987. &vmw_cmd_dx_check_subresource, true, false, true),
  2988. VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
  2989. &vmw_cmd_dx_check_subresource, true, false, true),
  2990. VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
  2991. &vmw_cmd_dx_check_subresource, true, false, true),
  2992. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
  2993. &vmw_cmd_dx_view_define, true, false, true),
  2994. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
  2995. &vmw_cmd_dx_view_remove, true, false, true),
  2996. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
  2997. &vmw_cmd_dx_view_define, true, false, true),
  2998. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
  2999. &vmw_cmd_dx_view_remove, true, false, true),
  3000. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
  3001. &vmw_cmd_dx_view_define, true, false, true),
  3002. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
  3003. &vmw_cmd_dx_view_remove, true, false, true),
  3004. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
  3005. &vmw_cmd_dx_so_define, true, false, true),
  3006. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
  3007. &vmw_cmd_dx_cid_check, true, false, true),
  3008. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
  3009. &vmw_cmd_dx_so_define, true, false, true),
  3010. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
  3011. &vmw_cmd_dx_cid_check, true, false, true),
  3012. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
  3013. &vmw_cmd_dx_so_define, true, false, true),
  3014. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
  3015. &vmw_cmd_dx_cid_check, true, false, true),
  3016. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
  3017. &vmw_cmd_dx_so_define, true, false, true),
  3018. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
  3019. &vmw_cmd_dx_cid_check, true, false, true),
  3020. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
  3021. &vmw_cmd_dx_so_define, true, false, true),
  3022. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
  3023. &vmw_cmd_dx_cid_check, true, false, true),
  3024. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
  3025. &vmw_cmd_dx_define_shader, true, false, true),
  3026. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
  3027. &vmw_cmd_dx_destroy_shader, true, false, true),
  3028. VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
  3029. &vmw_cmd_dx_bind_shader, true, false, true),
  3030. VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
  3031. &vmw_cmd_dx_so_define, true, false, true),
  3032. VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
  3033. &vmw_cmd_dx_cid_check, true, false, true),
  3034. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
  3035. true, false, true),
  3036. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
  3037. &vmw_cmd_dx_set_so_targets, true, false, true),
  3038. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
  3039. &vmw_cmd_dx_cid_check, true, false, true),
  3040. VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
  3041. &vmw_cmd_dx_cid_check, true, false, true),
  3042. VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
  3043. &vmw_cmd_buffer_copy_check, true, false, true),
  3044. VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
  3045. &vmw_cmd_pred_copy_check, true, false, true),
  3046. VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
  3047. &vmw_cmd_dx_transfer_from_buffer,
  3048. true, false, true),
  3049. };
  3050. static int vmw_cmd_check(struct vmw_private *dev_priv,
  3051. struct vmw_sw_context *sw_context,
  3052. void *buf, uint32_t *size)
  3053. {
  3054. uint32_t cmd_id;
  3055. uint32_t size_remaining = *size;
  3056. SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
  3057. int ret;
  3058. const struct vmw_cmd_entry *entry;
  3059. bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
  3060. cmd_id = ((uint32_t *)buf)[0];
  3061. /* Handle any none 3D commands */
  3062. if (unlikely(cmd_id < SVGA_CMD_MAX))
  3063. return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
  3064. cmd_id = header->id;
  3065. *size = header->size + sizeof(SVGA3dCmdHeader);
  3066. cmd_id -= SVGA_3D_CMD_BASE;
  3067. if (unlikely(*size > size_remaining))
  3068. goto out_invalid;
  3069. if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
  3070. goto out_invalid;
  3071. entry = &vmw_cmd_entries[cmd_id];
  3072. if (unlikely(!entry->func))
  3073. goto out_invalid;
  3074. if (unlikely(!entry->user_allow && !sw_context->kernel))
  3075. goto out_privileged;
  3076. if (unlikely(entry->gb_disable && gb))
  3077. goto out_old;
  3078. if (unlikely(entry->gb_enable && !gb))
  3079. goto out_new;
  3080. ret = entry->func(dev_priv, sw_context, header);
  3081. if (unlikely(ret != 0))
  3082. goto out_invalid;
  3083. return 0;
  3084. out_invalid:
  3085. DRM_ERROR("Invalid SVGA3D command: %d\n",
  3086. cmd_id + SVGA_3D_CMD_BASE);
  3087. return -EINVAL;
  3088. out_privileged:
  3089. DRM_ERROR("Privileged SVGA3D command: %d\n",
  3090. cmd_id + SVGA_3D_CMD_BASE);
  3091. return -EPERM;
  3092. out_old:
  3093. DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
  3094. cmd_id + SVGA_3D_CMD_BASE);
  3095. return -EINVAL;
  3096. out_new:
  3097. DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
  3098. cmd_id + SVGA_3D_CMD_BASE);
  3099. return -EINVAL;
  3100. }
  3101. static int vmw_cmd_check_all(struct vmw_private *dev_priv,
  3102. struct vmw_sw_context *sw_context,
  3103. void *buf,
  3104. uint32_t size)
  3105. {
  3106. int32_t cur_size = size;
  3107. int ret;
  3108. sw_context->buf_start = buf;
  3109. while (cur_size > 0) {
  3110. size = cur_size;
  3111. ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
  3112. if (unlikely(ret != 0))
  3113. return ret;
  3114. buf = (void *)((unsigned long) buf + size);
  3115. cur_size -= size;
  3116. }
  3117. if (unlikely(cur_size != 0)) {
  3118. DRM_ERROR("Command verifier out of sync.\n");
  3119. return -EINVAL;
  3120. }
  3121. return 0;
  3122. }
  3123. static void vmw_free_relocations(struct vmw_sw_context *sw_context)
  3124. {
  3125. sw_context->cur_reloc = 0;
  3126. }
  3127. static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
  3128. {
  3129. uint32_t i;
  3130. struct vmw_relocation *reloc;
  3131. struct ttm_validate_buffer *validate;
  3132. struct ttm_buffer_object *bo;
  3133. for (i = 0; i < sw_context->cur_reloc; ++i) {
  3134. reloc = &sw_context->relocs[i];
  3135. validate = &sw_context->val_bufs[reloc->index].base;
  3136. bo = validate->bo;
  3137. switch (bo->mem.mem_type) {
  3138. case TTM_PL_VRAM:
  3139. reloc->location->offset += bo->offset;
  3140. reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
  3141. break;
  3142. case VMW_PL_GMR:
  3143. reloc->location->gmrId = bo->mem.start;
  3144. break;
  3145. case VMW_PL_MOB:
  3146. *reloc->mob_loc = bo->mem.start;
  3147. break;
  3148. default:
  3149. BUG();
  3150. }
  3151. }
  3152. vmw_free_relocations(sw_context);
  3153. }
  3154. /**
  3155. * vmw_resource_list_unrefererence - Free up a resource list and unreference
  3156. * all resources referenced by it.
  3157. *
  3158. * @list: The resource list.
  3159. */
  3160. static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
  3161. struct list_head *list)
  3162. {
  3163. struct vmw_resource_val_node *val, *val_next;
  3164. /*
  3165. * Drop references to resources held during command submission.
  3166. */
  3167. list_for_each_entry_safe(val, val_next, list, head) {
  3168. list_del_init(&val->head);
  3169. vmw_resource_unreference(&val->res);
  3170. if (val->staged_bindings) {
  3171. if (val->staged_bindings != sw_context->staged_bindings)
  3172. vmw_binding_state_free(val->staged_bindings);
  3173. else
  3174. sw_context->staged_bindings_inuse = false;
  3175. val->staged_bindings = NULL;
  3176. }
  3177. kfree(val);
  3178. }
  3179. }
  3180. static void vmw_clear_validations(struct vmw_sw_context *sw_context)
  3181. {
  3182. struct vmw_validate_buffer *entry, *next;
  3183. struct vmw_resource_val_node *val;
  3184. /*
  3185. * Drop references to DMA buffers held during command submission.
  3186. */
  3187. list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
  3188. base.head) {
  3189. list_del(&entry->base.head);
  3190. ttm_bo_unref(&entry->base.bo);
  3191. (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
  3192. sw_context->cur_val_buf--;
  3193. }
  3194. BUG_ON(sw_context->cur_val_buf != 0);
  3195. list_for_each_entry(val, &sw_context->resource_list, head)
  3196. (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
  3197. }
  3198. int vmw_validate_single_buffer(struct vmw_private *dev_priv,
  3199. struct ttm_buffer_object *bo,
  3200. bool interruptible,
  3201. bool validate_as_mob)
  3202. {
  3203. struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
  3204. base);
  3205. int ret;
  3206. if (vbo->pin_count > 0)
  3207. return 0;
  3208. if (validate_as_mob)
  3209. return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
  3210. false);
  3211. /**
  3212. * Put BO in VRAM if there is space, otherwise as a GMR.
  3213. * If there is no space in VRAM and GMR ids are all used up,
  3214. * start evicting GMRs to make room. If the DMA buffer can't be
  3215. * used as a GMR, this will return -ENOMEM.
  3216. */
  3217. ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
  3218. false);
  3219. if (likely(ret == 0 || ret == -ERESTARTSYS))
  3220. return ret;
  3221. /**
  3222. * If that failed, try VRAM again, this time evicting
  3223. * previous contents.
  3224. */
  3225. ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
  3226. return ret;
  3227. }
  3228. static int vmw_validate_buffers(struct vmw_private *dev_priv,
  3229. struct vmw_sw_context *sw_context)
  3230. {
  3231. struct vmw_validate_buffer *entry;
  3232. int ret;
  3233. list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
  3234. ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
  3235. true,
  3236. entry->validate_as_mob);
  3237. if (unlikely(ret != 0))
  3238. return ret;
  3239. }
  3240. return 0;
  3241. }
  3242. static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
  3243. uint32_t size)
  3244. {
  3245. if (likely(sw_context->cmd_bounce_size >= size))
  3246. return 0;
  3247. if (sw_context->cmd_bounce_size == 0)
  3248. sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
  3249. while (sw_context->cmd_bounce_size < size) {
  3250. sw_context->cmd_bounce_size =
  3251. PAGE_ALIGN(sw_context->cmd_bounce_size +
  3252. (sw_context->cmd_bounce_size >> 1));
  3253. }
  3254. vfree(sw_context->cmd_bounce);
  3255. sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
  3256. if (sw_context->cmd_bounce == NULL) {
  3257. DRM_ERROR("Failed to allocate command bounce buffer.\n");
  3258. sw_context->cmd_bounce_size = 0;
  3259. return -ENOMEM;
  3260. }
  3261. return 0;
  3262. }
  3263. /**
  3264. * vmw_execbuf_fence_commands - create and submit a command stream fence
  3265. *
  3266. * Creates a fence object and submits a command stream marker.
  3267. * If this fails for some reason, We sync the fifo and return NULL.
  3268. * It is then safe to fence buffers with a NULL pointer.
  3269. *
  3270. * If @p_handle is not NULL @file_priv must also not be NULL. Creates
  3271. * a userspace handle if @p_handle is not NULL, otherwise not.
  3272. */
  3273. int vmw_execbuf_fence_commands(struct drm_file *file_priv,
  3274. struct vmw_private *dev_priv,
  3275. struct vmw_fence_obj **p_fence,
  3276. uint32_t *p_handle)
  3277. {
  3278. uint32_t sequence;
  3279. int ret;
  3280. bool synced = false;
  3281. /* p_handle implies file_priv. */
  3282. BUG_ON(p_handle != NULL && file_priv == NULL);
  3283. ret = vmw_fifo_send_fence(dev_priv, &sequence);
  3284. if (unlikely(ret != 0)) {
  3285. DRM_ERROR("Fence submission error. Syncing.\n");
  3286. synced = true;
  3287. }
  3288. if (p_handle != NULL)
  3289. ret = vmw_user_fence_create(file_priv, dev_priv->fman,
  3290. sequence, p_fence, p_handle);
  3291. else
  3292. ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
  3293. if (unlikely(ret != 0 && !synced)) {
  3294. (void) vmw_fallback_wait(dev_priv, false, false,
  3295. sequence, false,
  3296. VMW_FENCE_WAIT_TIMEOUT);
  3297. *p_fence = NULL;
  3298. }
  3299. return 0;
  3300. }
  3301. /**
  3302. * vmw_execbuf_copy_fence_user - copy fence object information to
  3303. * user-space.
  3304. *
  3305. * @dev_priv: Pointer to a vmw_private struct.
  3306. * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
  3307. * @ret: Return value from fence object creation.
  3308. * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
  3309. * which the information should be copied.
  3310. * @fence: Pointer to the fenc object.
  3311. * @fence_handle: User-space fence handle.
  3312. *
  3313. * This function copies fence information to user-space. If copying fails,
  3314. * The user-space struct drm_vmw_fence_rep::error member is hopefully
  3315. * left untouched, and if it's preloaded with an -EFAULT by user-space,
  3316. * the error will hopefully be detected.
  3317. * Also if copying fails, user-space will be unable to signal the fence
  3318. * object so we wait for it immediately, and then unreference the
  3319. * user-space reference.
  3320. */
  3321. void
  3322. vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
  3323. struct vmw_fpriv *vmw_fp,
  3324. int ret,
  3325. struct drm_vmw_fence_rep __user *user_fence_rep,
  3326. struct vmw_fence_obj *fence,
  3327. uint32_t fence_handle)
  3328. {
  3329. struct drm_vmw_fence_rep fence_rep;
  3330. if (user_fence_rep == NULL)
  3331. return;
  3332. memset(&fence_rep, 0, sizeof(fence_rep));
  3333. fence_rep.error = ret;
  3334. if (ret == 0) {
  3335. BUG_ON(fence == NULL);
  3336. fence_rep.handle = fence_handle;
  3337. fence_rep.seqno = fence->base.seqno;
  3338. vmw_update_seqno(dev_priv, &dev_priv->fifo);
  3339. fence_rep.passed_seqno = dev_priv->last_read_seqno;
  3340. }
  3341. /*
  3342. * copy_to_user errors will be detected by user space not
  3343. * seeing fence_rep::error filled in. Typically
  3344. * user-space would have pre-set that member to -EFAULT.
  3345. */
  3346. ret = copy_to_user(user_fence_rep, &fence_rep,
  3347. sizeof(fence_rep));
  3348. /*
  3349. * User-space lost the fence object. We need to sync
  3350. * and unreference the handle.
  3351. */
  3352. if (unlikely(ret != 0) && (fence_rep.error == 0)) {
  3353. ttm_ref_object_base_unref(vmw_fp->tfile,
  3354. fence_handle, TTM_REF_USAGE);
  3355. DRM_ERROR("Fence copy error. Syncing.\n");
  3356. (void) vmw_fence_obj_wait(fence, false, false,
  3357. VMW_FENCE_WAIT_TIMEOUT);
  3358. }
  3359. }
  3360. /**
  3361. * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
  3362. * the fifo.
  3363. *
  3364. * @dev_priv: Pointer to a device private structure.
  3365. * @kernel_commands: Pointer to the unpatched command batch.
  3366. * @command_size: Size of the unpatched command batch.
  3367. * @sw_context: Structure holding the relocation lists.
  3368. *
  3369. * Side effects: If this function returns 0, then the command batch
  3370. * pointed to by @kernel_commands will have been modified.
  3371. */
  3372. static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
  3373. void *kernel_commands,
  3374. u32 command_size,
  3375. struct vmw_sw_context *sw_context)
  3376. {
  3377. void *cmd;
  3378. if (sw_context->dx_ctx_node)
  3379. cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
  3380. sw_context->dx_ctx_node->res->id);
  3381. else
  3382. cmd = vmw_fifo_reserve(dev_priv, command_size);
  3383. if (!cmd) {
  3384. DRM_ERROR("Failed reserving fifo space for commands.\n");
  3385. return -ENOMEM;
  3386. }
  3387. vmw_apply_relocations(sw_context);
  3388. memcpy(cmd, kernel_commands, command_size);
  3389. vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
  3390. vmw_resource_relocations_free(&sw_context->res_relocations);
  3391. vmw_fifo_commit(dev_priv, command_size);
  3392. return 0;
  3393. }
  3394. /**
  3395. * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
  3396. * the command buffer manager.
  3397. *
  3398. * @dev_priv: Pointer to a device private structure.
  3399. * @header: Opaque handle to the command buffer allocation.
  3400. * @command_size: Size of the unpatched command batch.
  3401. * @sw_context: Structure holding the relocation lists.
  3402. *
  3403. * Side effects: If this function returns 0, then the command buffer
  3404. * represented by @header will have been modified.
  3405. */
  3406. static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
  3407. struct vmw_cmdbuf_header *header,
  3408. u32 command_size,
  3409. struct vmw_sw_context *sw_context)
  3410. {
  3411. u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
  3412. SVGA3D_INVALID_ID);
  3413. void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
  3414. id, false, header);
  3415. vmw_apply_relocations(sw_context);
  3416. vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
  3417. vmw_resource_relocations_free(&sw_context->res_relocations);
  3418. vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
  3419. return 0;
  3420. }
  3421. /**
  3422. * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
  3423. * submission using a command buffer.
  3424. *
  3425. * @dev_priv: Pointer to a device private structure.
  3426. * @user_commands: User-space pointer to the commands to be submitted.
  3427. * @command_size: Size of the unpatched command batch.
  3428. * @header: Out parameter returning the opaque pointer to the command buffer.
  3429. *
  3430. * This function checks whether we can use the command buffer manager for
  3431. * submission and if so, creates a command buffer of suitable size and
  3432. * copies the user data into that buffer.
  3433. *
  3434. * On successful return, the function returns a pointer to the data in the
  3435. * command buffer and *@header is set to non-NULL.
  3436. * If command buffers could not be used, the function will return the value
  3437. * of @kernel_commands on function call. That value may be NULL. In that case,
  3438. * the value of *@header will be set to NULL.
  3439. * If an error is encountered, the function will return a pointer error value.
  3440. * If the function is interrupted by a signal while sleeping, it will return
  3441. * -ERESTARTSYS casted to a pointer error value.
  3442. */
  3443. static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
  3444. void __user *user_commands,
  3445. void *kernel_commands,
  3446. u32 command_size,
  3447. struct vmw_cmdbuf_header **header)
  3448. {
  3449. size_t cmdbuf_size;
  3450. int ret;
  3451. *header = NULL;
  3452. if (command_size > SVGA_CB_MAX_SIZE) {
  3453. DRM_ERROR("Command buffer is too large.\n");
  3454. return ERR_PTR(-EINVAL);
  3455. }
  3456. if (!dev_priv->cman || kernel_commands)
  3457. return kernel_commands;
  3458. /* If possible, add a little space for fencing. */
  3459. cmdbuf_size = command_size + 512;
  3460. cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
  3461. kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
  3462. true, header);
  3463. if (IS_ERR(kernel_commands))
  3464. return kernel_commands;
  3465. ret = copy_from_user(kernel_commands, user_commands,
  3466. command_size);
  3467. if (ret) {
  3468. DRM_ERROR("Failed copying commands.\n");
  3469. vmw_cmdbuf_header_free(*header);
  3470. *header = NULL;
  3471. return ERR_PTR(-EFAULT);
  3472. }
  3473. return kernel_commands;
  3474. }
  3475. static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
  3476. struct vmw_sw_context *sw_context,
  3477. uint32_t handle)
  3478. {
  3479. struct vmw_resource_val_node *ctx_node;
  3480. struct vmw_resource *res;
  3481. int ret;
  3482. if (handle == SVGA3D_INVALID_ID)
  3483. return 0;
  3484. ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
  3485. handle, user_context_converter,
  3486. &res);
  3487. if (unlikely(ret != 0)) {
  3488. DRM_ERROR("Could not find or user DX context 0x%08x.\n",
  3489. (unsigned) handle);
  3490. return ret;
  3491. }
  3492. ret = vmw_resource_val_add(sw_context, res, &ctx_node);
  3493. if (unlikely(ret != 0))
  3494. goto out_err;
  3495. sw_context->dx_ctx_node = ctx_node;
  3496. sw_context->man = vmw_context_res_man(res);
  3497. out_err:
  3498. vmw_resource_unreference(&res);
  3499. return ret;
  3500. }
  3501. int vmw_execbuf_process(struct drm_file *file_priv,
  3502. struct vmw_private *dev_priv,
  3503. void __user *user_commands,
  3504. void *kernel_commands,
  3505. uint32_t command_size,
  3506. uint64_t throttle_us,
  3507. uint32_t dx_context_handle,
  3508. struct drm_vmw_fence_rep __user *user_fence_rep,
  3509. struct vmw_fence_obj **out_fence)
  3510. {
  3511. struct vmw_sw_context *sw_context = &dev_priv->ctx;
  3512. struct vmw_fence_obj *fence = NULL;
  3513. struct vmw_resource *error_resource;
  3514. struct list_head resource_list;
  3515. struct vmw_cmdbuf_header *header;
  3516. struct ww_acquire_ctx ticket;
  3517. uint32_t handle;
  3518. int ret;
  3519. if (throttle_us) {
  3520. ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
  3521. throttle_us);
  3522. if (ret)
  3523. return ret;
  3524. }
  3525. kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
  3526. kernel_commands, command_size,
  3527. &header);
  3528. if (IS_ERR(kernel_commands))
  3529. return PTR_ERR(kernel_commands);
  3530. ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
  3531. if (ret) {
  3532. ret = -ERESTARTSYS;
  3533. goto out_free_header;
  3534. }
  3535. sw_context->kernel = false;
  3536. if (kernel_commands == NULL) {
  3537. ret = vmw_resize_cmd_bounce(sw_context, command_size);
  3538. if (unlikely(ret != 0))
  3539. goto out_unlock;
  3540. ret = copy_from_user(sw_context->cmd_bounce,
  3541. user_commands, command_size);
  3542. if (unlikely(ret != 0)) {
  3543. ret = -EFAULT;
  3544. DRM_ERROR("Failed copying commands.\n");
  3545. goto out_unlock;
  3546. }
  3547. kernel_commands = sw_context->cmd_bounce;
  3548. } else if (!header)
  3549. sw_context->kernel = true;
  3550. sw_context->fp = vmw_fpriv(file_priv);
  3551. sw_context->cur_reloc = 0;
  3552. sw_context->cur_val_buf = 0;
  3553. INIT_LIST_HEAD(&sw_context->resource_list);
  3554. INIT_LIST_HEAD(&sw_context->ctx_resource_list);
  3555. sw_context->cur_query_bo = dev_priv->pinned_bo;
  3556. sw_context->last_query_ctx = NULL;
  3557. sw_context->needs_post_query_barrier = false;
  3558. sw_context->dx_ctx_node = NULL;
  3559. sw_context->dx_query_mob = NULL;
  3560. sw_context->dx_query_ctx = NULL;
  3561. memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
  3562. INIT_LIST_HEAD(&sw_context->validate_nodes);
  3563. INIT_LIST_HEAD(&sw_context->res_relocations);
  3564. if (sw_context->staged_bindings)
  3565. vmw_binding_state_reset(sw_context->staged_bindings);
  3566. if (!sw_context->res_ht_initialized) {
  3567. ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
  3568. if (unlikely(ret != 0))
  3569. goto out_unlock;
  3570. sw_context->res_ht_initialized = true;
  3571. }
  3572. INIT_LIST_HEAD(&sw_context->staged_cmd_res);
  3573. INIT_LIST_HEAD(&resource_list);
  3574. ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
  3575. if (unlikely(ret != 0)) {
  3576. list_splice_init(&sw_context->ctx_resource_list,
  3577. &sw_context->resource_list);
  3578. goto out_err_nores;
  3579. }
  3580. ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
  3581. command_size);
  3582. /*
  3583. * Merge the resource lists before checking the return status
  3584. * from vmd_cmd_check_all so that all the open hashtabs will
  3585. * be handled properly even if vmw_cmd_check_all fails.
  3586. */
  3587. list_splice_init(&sw_context->ctx_resource_list,
  3588. &sw_context->resource_list);
  3589. if (unlikely(ret != 0))
  3590. goto out_err_nores;
  3591. ret = vmw_resources_reserve(sw_context);
  3592. if (unlikely(ret != 0))
  3593. goto out_err_nores;
  3594. ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
  3595. true, NULL);
  3596. if (unlikely(ret != 0))
  3597. goto out_err_nores;
  3598. ret = vmw_validate_buffers(dev_priv, sw_context);
  3599. if (unlikely(ret != 0))
  3600. goto out_err;
  3601. ret = vmw_resources_validate(sw_context);
  3602. if (unlikely(ret != 0))
  3603. goto out_err;
  3604. ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
  3605. if (unlikely(ret != 0)) {
  3606. ret = -ERESTARTSYS;
  3607. goto out_err;
  3608. }
  3609. if (dev_priv->has_mob) {
  3610. ret = vmw_rebind_contexts(sw_context);
  3611. if (unlikely(ret != 0))
  3612. goto out_unlock_binding;
  3613. }
  3614. if (!header) {
  3615. ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
  3616. command_size, sw_context);
  3617. } else {
  3618. ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
  3619. sw_context);
  3620. header = NULL;
  3621. }
  3622. mutex_unlock(&dev_priv->binding_mutex);
  3623. if (ret)
  3624. goto out_err;
  3625. vmw_query_bo_switch_commit(dev_priv, sw_context);
  3626. ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
  3627. &fence,
  3628. (user_fence_rep) ? &handle : NULL);
  3629. /*
  3630. * This error is harmless, because if fence submission fails,
  3631. * vmw_fifo_send_fence will sync. The error will be propagated to
  3632. * user-space in @fence_rep
  3633. */
  3634. if (ret != 0)
  3635. DRM_ERROR("Fence submission error. Syncing.\n");
  3636. vmw_resources_unreserve(sw_context, false);
  3637. ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
  3638. (void *) fence);
  3639. if (unlikely(dev_priv->pinned_bo != NULL &&
  3640. !dev_priv->query_cid_valid))
  3641. __vmw_execbuf_release_pinned_bo(dev_priv, fence);
  3642. vmw_clear_validations(sw_context);
  3643. vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
  3644. user_fence_rep, fence, handle);
  3645. /* Don't unreference when handing fence out */
  3646. if (unlikely(out_fence != NULL)) {
  3647. *out_fence = fence;
  3648. fence = NULL;
  3649. } else if (likely(fence != NULL)) {
  3650. vmw_fence_obj_unreference(&fence);
  3651. }
  3652. list_splice_init(&sw_context->resource_list, &resource_list);
  3653. vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
  3654. mutex_unlock(&dev_priv->cmdbuf_mutex);
  3655. /*
  3656. * Unreference resources outside of the cmdbuf_mutex to
  3657. * avoid deadlocks in resource destruction paths.
  3658. */
  3659. vmw_resource_list_unreference(sw_context, &resource_list);
  3660. return 0;
  3661. out_unlock_binding:
  3662. mutex_unlock(&dev_priv->binding_mutex);
  3663. out_err:
  3664. ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
  3665. out_err_nores:
  3666. vmw_resources_unreserve(sw_context, true);
  3667. vmw_resource_relocations_free(&sw_context->res_relocations);
  3668. vmw_free_relocations(sw_context);
  3669. vmw_clear_validations(sw_context);
  3670. if (unlikely(dev_priv->pinned_bo != NULL &&
  3671. !dev_priv->query_cid_valid))
  3672. __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  3673. out_unlock:
  3674. list_splice_init(&sw_context->resource_list, &resource_list);
  3675. error_resource = sw_context->error_resource;
  3676. sw_context->error_resource = NULL;
  3677. vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
  3678. mutex_unlock(&dev_priv->cmdbuf_mutex);
  3679. /*
  3680. * Unreference resources outside of the cmdbuf_mutex to
  3681. * avoid deadlocks in resource destruction paths.
  3682. */
  3683. vmw_resource_list_unreference(sw_context, &resource_list);
  3684. if (unlikely(error_resource != NULL))
  3685. vmw_resource_unreference(&error_resource);
  3686. out_free_header:
  3687. if (header)
  3688. vmw_cmdbuf_header_free(header);
  3689. return ret;
  3690. }
  3691. /**
  3692. * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
  3693. *
  3694. * @dev_priv: The device private structure.
  3695. *
  3696. * This function is called to idle the fifo and unpin the query buffer
  3697. * if the normal way to do this hits an error, which should typically be
  3698. * extremely rare.
  3699. */
  3700. static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
  3701. {
  3702. DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
  3703. (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
  3704. vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
  3705. if (dev_priv->dummy_query_bo_pinned) {
  3706. vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
  3707. dev_priv->dummy_query_bo_pinned = false;
  3708. }
  3709. }
  3710. /**
  3711. * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
  3712. * query bo.
  3713. *
  3714. * @dev_priv: The device private structure.
  3715. * @fence: If non-NULL should point to a struct vmw_fence_obj issued
  3716. * _after_ a query barrier that flushes all queries touching the current
  3717. * buffer pointed to by @dev_priv->pinned_bo
  3718. *
  3719. * This function should be used to unpin the pinned query bo, or
  3720. * as a query barrier when we need to make sure that all queries have
  3721. * finished before the next fifo command. (For example on hardware
  3722. * context destructions where the hardware may otherwise leak unfinished
  3723. * queries).
  3724. *
  3725. * This function does not return any failure codes, but make attempts
  3726. * to do safe unpinning in case of errors.
  3727. *
  3728. * The function will synchronize on the previous query barrier, and will
  3729. * thus not finish until that barrier has executed.
  3730. *
  3731. * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
  3732. * before calling this function.
  3733. */
  3734. void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
  3735. struct vmw_fence_obj *fence)
  3736. {
  3737. int ret = 0;
  3738. struct list_head validate_list;
  3739. struct ttm_validate_buffer pinned_val, query_val;
  3740. struct vmw_fence_obj *lfence = NULL;
  3741. struct ww_acquire_ctx ticket;
  3742. if (dev_priv->pinned_bo == NULL)
  3743. goto out_unlock;
  3744. INIT_LIST_HEAD(&validate_list);
  3745. pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
  3746. pinned_val.shared = false;
  3747. list_add_tail(&pinned_val.head, &validate_list);
  3748. query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
  3749. query_val.shared = false;
  3750. list_add_tail(&query_val.head, &validate_list);
  3751. ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
  3752. false, NULL);
  3753. if (unlikely(ret != 0)) {
  3754. vmw_execbuf_unpin_panic(dev_priv);
  3755. goto out_no_reserve;
  3756. }
  3757. if (dev_priv->query_cid_valid) {
  3758. BUG_ON(fence != NULL);
  3759. ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
  3760. if (unlikely(ret != 0)) {
  3761. vmw_execbuf_unpin_panic(dev_priv);
  3762. goto out_no_emit;
  3763. }
  3764. dev_priv->query_cid_valid = false;
  3765. }
  3766. vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
  3767. if (dev_priv->dummy_query_bo_pinned) {
  3768. vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
  3769. dev_priv->dummy_query_bo_pinned = false;
  3770. }
  3771. if (fence == NULL) {
  3772. (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
  3773. NULL);
  3774. fence = lfence;
  3775. }
  3776. ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
  3777. if (lfence != NULL)
  3778. vmw_fence_obj_unreference(&lfence);
  3779. ttm_bo_unref(&query_val.bo);
  3780. ttm_bo_unref(&pinned_val.bo);
  3781. vmw_dmabuf_unreference(&dev_priv->pinned_bo);
  3782. out_unlock:
  3783. return;
  3784. out_no_emit:
  3785. ttm_eu_backoff_reservation(&ticket, &validate_list);
  3786. out_no_reserve:
  3787. ttm_bo_unref(&query_val.bo);
  3788. ttm_bo_unref(&pinned_val.bo);
  3789. vmw_dmabuf_unreference(&dev_priv->pinned_bo);
  3790. }
  3791. /**
  3792. * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
  3793. * query bo.
  3794. *
  3795. * @dev_priv: The device private structure.
  3796. *
  3797. * This function should be used to unpin the pinned query bo, or
  3798. * as a query barrier when we need to make sure that all queries have
  3799. * finished before the next fifo command. (For example on hardware
  3800. * context destructions where the hardware may otherwise leak unfinished
  3801. * queries).
  3802. *
  3803. * This function does not return any failure codes, but make attempts
  3804. * to do safe unpinning in case of errors.
  3805. *
  3806. * The function will synchronize on the previous query barrier, and will
  3807. * thus not finish until that barrier has executed.
  3808. */
  3809. void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
  3810. {
  3811. mutex_lock(&dev_priv->cmdbuf_mutex);
  3812. if (dev_priv->query_cid_valid)
  3813. __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  3814. mutex_unlock(&dev_priv->cmdbuf_mutex);
  3815. }
  3816. int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
  3817. struct drm_file *file_priv, size_t size)
  3818. {
  3819. struct vmw_private *dev_priv = vmw_priv(dev);
  3820. struct drm_vmw_execbuf_arg arg;
  3821. int ret;
  3822. static const size_t copy_offset[] = {
  3823. offsetof(struct drm_vmw_execbuf_arg, context_handle),
  3824. sizeof(struct drm_vmw_execbuf_arg)};
  3825. if (unlikely(size < copy_offset[0])) {
  3826. DRM_ERROR("Invalid command size, ioctl %d\n",
  3827. DRM_VMW_EXECBUF);
  3828. return -EINVAL;
  3829. }
  3830. if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
  3831. return -EFAULT;
  3832. /*
  3833. * Extend the ioctl argument while
  3834. * maintaining backwards compatibility:
  3835. * We take different code paths depending on the value of
  3836. * arg.version.
  3837. */
  3838. if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
  3839. arg.version == 0)) {
  3840. DRM_ERROR("Incorrect execbuf version.\n");
  3841. return -EINVAL;
  3842. }
  3843. if (arg.version > 1 &&
  3844. copy_from_user(&arg.context_handle,
  3845. (void __user *) (data + copy_offset[0]),
  3846. copy_offset[arg.version - 1] -
  3847. copy_offset[0]) != 0)
  3848. return -EFAULT;
  3849. switch (arg.version) {
  3850. case 1:
  3851. arg.context_handle = (uint32_t) -1;
  3852. break;
  3853. case 2:
  3854. if (arg.pad64 != 0) {
  3855. DRM_ERROR("Unused IOCTL data not set to zero.\n");
  3856. return -EINVAL;
  3857. }
  3858. break;
  3859. default:
  3860. break;
  3861. }
  3862. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  3863. if (unlikely(ret != 0))
  3864. return ret;
  3865. ret = vmw_execbuf_process(file_priv, dev_priv,
  3866. (void __user *)(unsigned long)arg.commands,
  3867. NULL, arg.command_size, arg.throttle_us,
  3868. arg.context_handle,
  3869. (void __user *)(unsigned long)arg.fence_rep,
  3870. NULL);
  3871. ttm_read_unlock(&dev_priv->reservation_sem);
  3872. if (unlikely(ret != 0))
  3873. return ret;
  3874. vmw_kms_cursor_post_execbuf(dev_priv);
  3875. return 0;
  3876. }