cmd_parser.c 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881
  1. /*
  2. * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. * SOFTWARE.
  22. *
  23. * Authors:
  24. * Ke Yu
  25. * Kevin Tian <kevin.tian@intel.com>
  26. * Zhiyuan Lv <zhiyuan.lv@intel.com>
  27. *
  28. * Contributors:
  29. * Min He <min.he@intel.com>
  30. * Ping Gao <ping.a.gao@intel.com>
  31. * Tina Zhang <tina.zhang@intel.com>
  32. * Yulei Zhang <yulei.zhang@intel.com>
  33. * Zhi Wang <zhi.a.wang@intel.com>
  34. *
  35. */
  36. #include <linux/slab.h>
  37. #include "i915_drv.h"
  38. #include "gvt.h"
  39. #include "i915_pvinfo.h"
  40. #include "trace.h"
  41. #define INVALID_OP (~0U)
  42. #define OP_LEN_MI 9
  43. #define OP_LEN_2D 10
  44. #define OP_LEN_3D_MEDIA 16
  45. #define OP_LEN_MFX_VC 16
  46. #define OP_LEN_VEBOX 16
  47. #define CMD_TYPE(cmd) (((cmd) >> 29) & 7)
  48. struct sub_op_bits {
  49. int hi;
  50. int low;
  51. };
  52. struct decode_info {
  53. char *name;
  54. int op_len;
  55. int nr_sub_op;
  56. struct sub_op_bits *sub_op;
  57. };
  58. #define MAX_CMD_BUDGET 0x7fffffff
  59. #define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15)
  60. #define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9)
  61. #define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1)
  62. #define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20)
  63. #define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10)
  64. #define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2)
  65. /* Render Command Map */
  66. /* MI_* command Opcode (28:23) */
  67. #define OP_MI_NOOP 0x0
  68. #define OP_MI_SET_PREDICATE 0x1 /* HSW+ */
  69. #define OP_MI_USER_INTERRUPT 0x2
  70. #define OP_MI_WAIT_FOR_EVENT 0x3
  71. #define OP_MI_FLUSH 0x4
  72. #define OP_MI_ARB_CHECK 0x5
  73. #define OP_MI_RS_CONTROL 0x6 /* HSW+ */
  74. #define OP_MI_REPORT_HEAD 0x7
  75. #define OP_MI_ARB_ON_OFF 0x8
  76. #define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */
  77. #define OP_MI_BATCH_BUFFER_END 0xA
  78. #define OP_MI_SUSPEND_FLUSH 0xB
  79. #define OP_MI_PREDICATE 0xC /* IVB+ */
  80. #define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */
  81. #define OP_MI_SET_APPID 0xE /* IVB+ */
  82. #define OP_MI_RS_CONTEXT 0xF /* HSW+ */
  83. #define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */
  84. #define OP_MI_DISPLAY_FLIP 0x14
  85. #define OP_MI_SEMAPHORE_MBOX 0x16
  86. #define OP_MI_SET_CONTEXT 0x18
  87. #define OP_MI_MATH 0x1A
  88. #define OP_MI_URB_CLEAR 0x19
  89. #define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */
  90. #define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */
  91. #define OP_MI_STORE_DATA_IMM 0x20
  92. #define OP_MI_STORE_DATA_INDEX 0x21
  93. #define OP_MI_LOAD_REGISTER_IMM 0x22
  94. #define OP_MI_UPDATE_GTT 0x23
  95. #define OP_MI_STORE_REGISTER_MEM 0x24
  96. #define OP_MI_FLUSH_DW 0x26
  97. #define OP_MI_CLFLUSH 0x27
  98. #define OP_MI_REPORT_PERF_COUNT 0x28
  99. #define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */
  100. #define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */
  101. #define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */
  102. #define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */
  103. #define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */
  104. #define OP_MI_2E 0x2E /* BDW+ */
  105. #define OP_MI_2F 0x2F /* BDW+ */
  106. #define OP_MI_BATCH_BUFFER_START 0x31
  107. /* Bit definition for dword 0 */
  108. #define _CMDBIT_BB_START_IN_PPGTT (1UL << 8)
  109. #define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36
  110. #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
  111. #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
  112. #define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U)
  113. #define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U)
  114. /* 2D command: Opcode (28:22) */
  115. #define OP_2D(x) ((2<<7) | x)
  116. #define OP_XY_SETUP_BLT OP_2D(0x1)
  117. #define OP_XY_SETUP_CLIP_BLT OP_2D(0x3)
  118. #define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11)
  119. #define OP_XY_PIXEL_BLT OP_2D(0x24)
  120. #define OP_XY_SCANLINES_BLT OP_2D(0x25)
  121. #define OP_XY_TEXT_BLT OP_2D(0x26)
  122. #define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31)
  123. #define OP_XY_COLOR_BLT OP_2D(0x50)
  124. #define OP_XY_PAT_BLT OP_2D(0x51)
  125. #define OP_XY_MONO_PAT_BLT OP_2D(0x52)
  126. #define OP_XY_SRC_COPY_BLT OP_2D(0x53)
  127. #define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54)
  128. #define OP_XY_FULL_BLT OP_2D(0x55)
  129. #define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56)
  130. #define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57)
  131. #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58)
  132. #define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59)
  133. #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71)
  134. #define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72)
  135. #define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73)
  136. #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74)
  137. #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75)
  138. #define OP_XY_PAT_CHROMA_BLT OP_2D(0x76)
  139. #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77)
  140. /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
  141. #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
  142. ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
  143. #define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03)
  144. #define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
  145. #define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
  146. #define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
  147. #define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
  148. #define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04)
  149. #define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0)
  150. #define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1)
  151. #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
  152. #define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
  153. #define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
  154. #define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
  155. #define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
  156. #define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3)
  157. #define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5)
  158. #define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
  159. #define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
  160. #define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
  161. #define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
  162. #define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08)
  163. #define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09)
  164. #define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A)
  165. #define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B)
  166. #define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */
  167. #define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E)
  168. #define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F)
  169. #define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10)
  170. #define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11)
  171. #define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12)
  172. #define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13)
  173. #define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14)
  174. #define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15)
  175. #define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16)
  176. #define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17)
  177. #define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18)
  178. #define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
  179. #define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
  180. #define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
  181. #define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
  182. #define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
  183. #define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
  184. #define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
  185. #define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
  186. #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
  187. #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
  188. #define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
  189. #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
  190. #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
  191. #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
  192. #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
  193. #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
  194. #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
  195. #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
  196. #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
  197. #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
  198. #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
  199. #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
  200. #define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
  201. #define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
  202. #define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
  203. #define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
  204. #define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
  205. #define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
  206. #define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
  207. #define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
  208. #define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
  209. #define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
  210. #define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
  211. #define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
  212. #define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
  213. #define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
  214. #define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
  215. #define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
  216. #define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
  217. #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
  218. #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
  219. #define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
  220. #define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
  221. #define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
  222. #define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
  223. #define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
  224. #define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
  225. #define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
  226. #define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
  227. #define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
  228. #define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
  229. #define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
  230. #define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
  231. #define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
  232. #define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
  233. #define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
  234. #define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
  235. #define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00)
  236. #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02)
  237. #define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04)
  238. #define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05)
  239. #define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06)
  240. #define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07)
  241. #define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08)
  242. #define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A)
  243. #define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B)
  244. #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C)
  245. #define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D)
  246. #define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E)
  247. #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F)
  248. #define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10)
  249. #define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11)
  250. #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
  251. #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
  252. #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
  253. #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
  254. #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
  255. #define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17)
  256. #define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18)
  257. #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
  258. #define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
  259. #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
  260. #define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C)
  261. #define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00)
  262. #define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00)
  263. /* VCCP Command Parser */
  264. /*
  265. * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
  266. * git://anongit.freedesktop.org/vaapi/intel-driver
  267. * src/i965_defines.h
  268. *
  269. */
  270. #define OP_MFX(pipeline, op, sub_opa, sub_opb) \
  271. (3 << 13 | \
  272. (pipeline) << 11 | \
  273. (op) << 8 | \
  274. (sub_opa) << 5 | \
  275. (sub_opb))
  276. #define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */
  277. #define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */
  278. #define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */
  279. #define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */
  280. #define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */
  281. #define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */
  282. #define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */
  283. #define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */
  284. #define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */
  285. #define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */
  286. #define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */
  287. #define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */
  288. #define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */
  289. #define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */
  290. #define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */
  291. #define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */
  292. #define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */
  293. #define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */
  294. #define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */
  295. #define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */
  296. #define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */
  297. #define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */
  298. #define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */
  299. #define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */
  300. #define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */
  301. #define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */
  302. #define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */
  303. #define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */
  304. #define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */
  305. #define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */
  306. #define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */
  307. #define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */
  308. #define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */
  309. #define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */
  310. #define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */
  311. #define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */
  312. #define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */
  313. #define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0)
  314. #define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2)
  315. #define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8)
  316. #define OP_VEB(pipeline, op, sub_opa, sub_opb) \
  317. (3 << 13 | \
  318. (pipeline) << 11 | \
  319. (op) << 8 | \
  320. (sub_opa) << 5 | \
  321. (sub_opb))
  322. #define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0)
  323. #define OP_VEB_STATE OP_VEB(2, 4, 0, 2)
  324. #define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3)
  325. struct parser_exec_state;
  326. typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
  327. #define GVT_CMD_HASH_BITS 7
  328. /* which DWords need address fix */
  329. #define ADDR_FIX_1(x1) (1 << (x1))
  330. #define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
  331. #define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
  332. #define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
  333. #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
  334. struct cmd_info {
  335. char *name;
  336. u32 opcode;
  337. #define F_LEN_MASK (1U<<0)
  338. #define F_LEN_CONST 1U
  339. #define F_LEN_VAR 0U
  340. /*
  341. * command has its own ip advance logic
  342. * e.g. MI_BATCH_START, MI_BATCH_END
  343. */
  344. #define F_IP_ADVANCE_CUSTOM (1<<1)
  345. #define F_POST_HANDLE (1<<2)
  346. u32 flag;
  347. #define R_RCS (1 << RCS)
  348. #define R_VCS1 (1 << VCS)
  349. #define R_VCS2 (1 << VCS2)
  350. #define R_VCS (R_VCS1 | R_VCS2)
  351. #define R_BCS (1 << BCS)
  352. #define R_VECS (1 << VECS)
  353. #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
  354. /* rings that support this cmd: BLT/RCS/VCS/VECS */
  355. uint16_t rings;
  356. /* devices that support this cmd: SNB/IVB/HSW/... */
  357. uint16_t devices;
  358. /* which DWords are address that need fix up.
  359. * bit 0 means a 32-bit non address operand in command
  360. * bit 1 means address operand, which could be 32-bit
  361. * or 64-bit depending on different architectures.(
  362. * defined by "gmadr_bytes_in_cmd" in intel_gvt.
  363. * No matter the address length, each address only takes
  364. * one bit in the bitmap.
  365. */
  366. uint16_t addr_bitmap;
  367. /* flag == F_LEN_CONST : command length
  368. * flag == F_LEN_VAR : length bias bits
  369. * Note: length is in DWord
  370. */
  371. uint8_t len;
  372. parser_cmd_handler handler;
  373. };
  374. struct cmd_entry {
  375. struct hlist_node hlist;
  376. struct cmd_info *info;
  377. };
  378. enum {
  379. RING_BUFFER_INSTRUCTION,
  380. BATCH_BUFFER_INSTRUCTION,
  381. BATCH_BUFFER_2ND_LEVEL,
  382. };
  383. enum {
  384. GTT_BUFFER,
  385. PPGTT_BUFFER
  386. };
  387. struct parser_exec_state {
  388. struct intel_vgpu *vgpu;
  389. int ring_id;
  390. int buf_type;
  391. /* batch buffer address type */
  392. int buf_addr_type;
  393. /* graphics memory address of ring buffer start */
  394. unsigned long ring_start;
  395. unsigned long ring_size;
  396. unsigned long ring_head;
  397. unsigned long ring_tail;
  398. /* instruction graphics memory address */
  399. unsigned long ip_gma;
  400. /* mapped va of the instr_gma */
  401. void *ip_va;
  402. void *rb_va;
  403. void *ret_bb_va;
  404. /* next instruction when return from batch buffer to ring buffer */
  405. unsigned long ret_ip_gma_ring;
  406. /* next instruction when return from 2nd batch buffer to batch buffer */
  407. unsigned long ret_ip_gma_bb;
  408. /* batch buffer address type (GTT or PPGTT)
  409. * used when ret from 2nd level batch buffer
  410. */
  411. int saved_buf_addr_type;
  412. struct cmd_info *info;
  413. struct intel_vgpu_workload *workload;
  414. };
  415. #define gmadr_dw_number(s) \
  416. (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
  417. static unsigned long bypass_scan_mask = 0;
  418. /* ring ALL, type = 0 */
  419. static struct sub_op_bits sub_op_mi[] = {
  420. {31, 29},
  421. {28, 23},
  422. };
  423. static struct decode_info decode_info_mi = {
  424. "MI",
  425. OP_LEN_MI,
  426. ARRAY_SIZE(sub_op_mi),
  427. sub_op_mi,
  428. };
  429. /* ring RCS, command type 2 */
  430. static struct sub_op_bits sub_op_2d[] = {
  431. {31, 29},
  432. {28, 22},
  433. };
  434. static struct decode_info decode_info_2d = {
  435. "2D",
  436. OP_LEN_2D,
  437. ARRAY_SIZE(sub_op_2d),
  438. sub_op_2d,
  439. };
  440. /* ring RCS, command type 3 */
  441. static struct sub_op_bits sub_op_3d_media[] = {
  442. {31, 29},
  443. {28, 27},
  444. {26, 24},
  445. {23, 16},
  446. };
  447. static struct decode_info decode_info_3d_media = {
  448. "3D_Media",
  449. OP_LEN_3D_MEDIA,
  450. ARRAY_SIZE(sub_op_3d_media),
  451. sub_op_3d_media,
  452. };
  453. /* ring VCS, command type 3 */
  454. static struct sub_op_bits sub_op_mfx_vc[] = {
  455. {31, 29},
  456. {28, 27},
  457. {26, 24},
  458. {23, 21},
  459. {20, 16},
  460. };
  461. static struct decode_info decode_info_mfx_vc = {
  462. "MFX_VC",
  463. OP_LEN_MFX_VC,
  464. ARRAY_SIZE(sub_op_mfx_vc),
  465. sub_op_mfx_vc,
  466. };
  467. /* ring VECS, command type 3 */
  468. static struct sub_op_bits sub_op_vebox[] = {
  469. {31, 29},
  470. {28, 27},
  471. {26, 24},
  472. {23, 21},
  473. {20, 16},
  474. };
  475. static struct decode_info decode_info_vebox = {
  476. "VEBOX",
  477. OP_LEN_VEBOX,
  478. ARRAY_SIZE(sub_op_vebox),
  479. sub_op_vebox,
  480. };
  481. static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
  482. [RCS] = {
  483. &decode_info_mi,
  484. NULL,
  485. NULL,
  486. &decode_info_3d_media,
  487. NULL,
  488. NULL,
  489. NULL,
  490. NULL,
  491. },
  492. [VCS] = {
  493. &decode_info_mi,
  494. NULL,
  495. NULL,
  496. &decode_info_mfx_vc,
  497. NULL,
  498. NULL,
  499. NULL,
  500. NULL,
  501. },
  502. [BCS] = {
  503. &decode_info_mi,
  504. NULL,
  505. &decode_info_2d,
  506. NULL,
  507. NULL,
  508. NULL,
  509. NULL,
  510. NULL,
  511. },
  512. [VECS] = {
  513. &decode_info_mi,
  514. NULL,
  515. NULL,
  516. &decode_info_vebox,
  517. NULL,
  518. NULL,
  519. NULL,
  520. NULL,
  521. },
  522. [VCS2] = {
  523. &decode_info_mi,
  524. NULL,
  525. NULL,
  526. &decode_info_mfx_vc,
  527. NULL,
  528. NULL,
  529. NULL,
  530. NULL,
  531. },
  532. };
  533. static inline u32 get_opcode(u32 cmd, int ring_id)
  534. {
  535. struct decode_info *d_info;
  536. d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
  537. if (d_info == NULL)
  538. return INVALID_OP;
  539. return cmd >> (32 - d_info->op_len);
  540. }
  541. static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
  542. unsigned int opcode, int ring_id)
  543. {
  544. struct cmd_entry *e;
  545. hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
  546. if ((opcode == e->info->opcode) &&
  547. (e->info->rings & (1 << ring_id)))
  548. return e->info;
  549. }
  550. return NULL;
  551. }
  552. static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
  553. u32 cmd, int ring_id)
  554. {
  555. u32 opcode;
  556. opcode = get_opcode(cmd, ring_id);
  557. if (opcode == INVALID_OP)
  558. return NULL;
  559. return find_cmd_entry(gvt, opcode, ring_id);
  560. }
  561. static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
  562. {
  563. return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
  564. }
  565. static inline void print_opcode(u32 cmd, int ring_id)
  566. {
  567. struct decode_info *d_info;
  568. int i;
  569. d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
  570. if (d_info == NULL)
  571. return;
  572. gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
  573. cmd >> (32 - d_info->op_len), d_info->name);
  574. for (i = 0; i < d_info->nr_sub_op; i++)
  575. pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
  576. d_info->sub_op[i].low));
  577. pr_err("\n");
  578. }
  579. static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
  580. {
  581. return s->ip_va + (index << 2);
  582. }
  583. static inline u32 cmd_val(struct parser_exec_state *s, int index)
  584. {
  585. return *cmd_ptr(s, index);
  586. }
  587. static void parser_exec_state_dump(struct parser_exec_state *s)
  588. {
  589. int cnt = 0;
  590. int i;
  591. gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
  592. " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
  593. s->ring_id, s->ring_start, s->ring_start + s->ring_size,
  594. s->ring_head, s->ring_tail);
  595. gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
  596. s->buf_type == RING_BUFFER_INSTRUCTION ?
  597. "RING_BUFFER" : "BATCH_BUFFER",
  598. s->buf_addr_type == GTT_BUFFER ?
  599. "GTT" : "PPGTT", s->ip_gma);
  600. if (s->ip_va == NULL) {
  601. gvt_dbg_cmd(" ip_va(NULL)");
  602. return;
  603. }
  604. gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
  605. s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
  606. cmd_val(s, 2), cmd_val(s, 3));
  607. print_opcode(cmd_val(s, 0), s->ring_id);
  608. /* print the whole page to trace */
  609. pr_err(" ip_va=%p: %08x %08x %08x %08x\n",
  610. s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
  611. cmd_val(s, 2), cmd_val(s, 3));
  612. s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
  613. while (cnt < 1024) {
  614. pr_err("ip_va=%p: ", s->ip_va);
  615. for (i = 0; i < 8; i++)
  616. pr_err("%08x ", cmd_val(s, i));
  617. pr_err("\n");
  618. s->ip_va += 8 * sizeof(u32);
  619. cnt += 8;
  620. }
  621. }
  622. static inline void update_ip_va(struct parser_exec_state *s)
  623. {
  624. unsigned long len = 0;
  625. if (WARN_ON(s->ring_head == s->ring_tail))
  626. return;
  627. if (s->buf_type == RING_BUFFER_INSTRUCTION) {
  628. unsigned long ring_top = s->ring_start + s->ring_size;
  629. if (s->ring_head > s->ring_tail) {
  630. if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
  631. len = (s->ip_gma - s->ring_head);
  632. else if (s->ip_gma >= s->ring_start &&
  633. s->ip_gma <= s->ring_tail)
  634. len = (ring_top - s->ring_head) +
  635. (s->ip_gma - s->ring_start);
  636. } else
  637. len = (s->ip_gma - s->ring_head);
  638. s->ip_va = s->rb_va + len;
  639. } else {/* shadow batch buffer */
  640. s->ip_va = s->ret_bb_va;
  641. }
  642. }
  643. static inline int ip_gma_set(struct parser_exec_state *s,
  644. unsigned long ip_gma)
  645. {
  646. WARN_ON(!IS_ALIGNED(ip_gma, 4));
  647. s->ip_gma = ip_gma;
  648. update_ip_va(s);
  649. return 0;
  650. }
  651. static inline int ip_gma_advance(struct parser_exec_state *s,
  652. unsigned int dw_len)
  653. {
  654. s->ip_gma += (dw_len << 2);
  655. if (s->buf_type == RING_BUFFER_INSTRUCTION) {
  656. if (s->ip_gma >= s->ring_start + s->ring_size)
  657. s->ip_gma -= s->ring_size;
  658. update_ip_va(s);
  659. } else {
  660. s->ip_va += (dw_len << 2);
  661. }
  662. return 0;
  663. }
  664. static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
  665. {
  666. if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
  667. return info->len;
  668. else
  669. return (cmd & ((1U << info->len) - 1)) + 2;
  670. return 0;
  671. }
  672. static inline int cmd_length(struct parser_exec_state *s)
  673. {
  674. return get_cmd_length(s->info, cmd_val(s, 0));
  675. }
  676. /* do not remove this, some platform may need clflush here */
  677. #define patch_value(s, addr, val) do { \
  678. *addr = val; \
  679. } while (0)
  680. static bool is_shadowed_mmio(unsigned int offset)
  681. {
  682. bool ret = false;
  683. if ((offset == 0x2168) || /*BB current head register UDW */
  684. (offset == 0x2140) || /*BB current header register */
  685. (offset == 0x211c) || /*second BB header register UDW */
  686. (offset == 0x2114)) { /*second BB header register UDW */
  687. ret = true;
  688. }
  689. return ret;
  690. }
  691. static inline bool is_force_nonpriv_mmio(unsigned int offset)
  692. {
  693. return (offset >= 0x24d0 && offset < 0x2500);
  694. }
  695. static int force_nonpriv_reg_handler(struct parser_exec_state *s,
  696. unsigned int offset, unsigned int index)
  697. {
  698. struct intel_gvt *gvt = s->vgpu->gvt;
  699. unsigned int data = cmd_val(s, index + 1);
  700. if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
  701. gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
  702. offset, data);
  703. return -EINVAL;
  704. }
  705. return 0;
  706. }
  707. static int cmd_reg_handler(struct parser_exec_state *s,
  708. unsigned int offset, unsigned int index, char *cmd)
  709. {
  710. struct intel_vgpu *vgpu = s->vgpu;
  711. struct intel_gvt *gvt = vgpu->gvt;
  712. if (offset + 4 > gvt->device_info.mmio_size) {
  713. gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
  714. cmd, offset);
  715. return -EINVAL;
  716. }
  717. if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
  718. gvt_vgpu_err("%s access to non-render register (%x)\n",
  719. cmd, offset);
  720. return 0;
  721. }
  722. if (is_shadowed_mmio(offset)) {
  723. gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
  724. return 0;
  725. }
  726. if (is_force_nonpriv_mmio(offset) &&
  727. force_nonpriv_reg_handler(s, offset, index))
  728. return -EINVAL;
  729. if (offset == i915_mmio_reg_offset(DERRMR) ||
  730. offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
  731. /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
  732. patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
  733. }
  734. /* TODO: Update the global mask if this MMIO is a masked-MMIO */
  735. intel_gvt_mmio_set_cmd_accessed(gvt, offset);
  736. return 0;
  737. }
  738. #define cmd_reg(s, i) \
  739. (cmd_val(s, i) & GENMASK(22, 2))
  740. #define cmd_reg_inhibit(s, i) \
  741. (cmd_val(s, i) & GENMASK(22, 18))
  742. #define cmd_gma(s, i) \
  743. (cmd_val(s, i) & GENMASK(31, 2))
  744. #define cmd_gma_hi(s, i) \
  745. (cmd_val(s, i) & GENMASK(15, 0))
  746. static int cmd_handler_lri(struct parser_exec_state *s)
  747. {
  748. int i, ret = 0;
  749. int cmd_len = cmd_length(s);
  750. struct intel_gvt *gvt = s->vgpu->gvt;
  751. for (i = 1; i < cmd_len; i += 2) {
  752. if (IS_BROADWELL(gvt->dev_priv) &&
  753. (s->ring_id != RCS)) {
  754. if (s->ring_id == BCS &&
  755. cmd_reg(s, i) ==
  756. i915_mmio_reg_offset(DERRMR))
  757. ret |= 0;
  758. else
  759. ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
  760. }
  761. if (ret)
  762. break;
  763. ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
  764. }
  765. return ret;
  766. }
  767. static int cmd_handler_lrr(struct parser_exec_state *s)
  768. {
  769. int i, ret = 0;
  770. int cmd_len = cmd_length(s);
  771. for (i = 1; i < cmd_len; i += 2) {
  772. if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
  773. ret |= ((cmd_reg_inhibit(s, i) ||
  774. (cmd_reg_inhibit(s, i + 1)))) ?
  775. -EINVAL : 0;
  776. if (ret)
  777. break;
  778. ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
  779. ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
  780. }
  781. return ret;
  782. }
  783. static inline int cmd_address_audit(struct parser_exec_state *s,
  784. unsigned long guest_gma, int op_size, bool index_mode);
  785. static int cmd_handler_lrm(struct parser_exec_state *s)
  786. {
  787. struct intel_gvt *gvt = s->vgpu->gvt;
  788. int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
  789. unsigned long gma;
  790. int i, ret = 0;
  791. int cmd_len = cmd_length(s);
  792. for (i = 1; i < cmd_len;) {
  793. if (IS_BROADWELL(gvt->dev_priv))
  794. ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
  795. if (ret)
  796. break;
  797. ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
  798. if (cmd_val(s, 0) & (1 << 22)) {
  799. gma = cmd_gma(s, i + 1);
  800. if (gmadr_bytes == 8)
  801. gma |= (cmd_gma_hi(s, i + 2)) << 32;
  802. ret |= cmd_address_audit(s, gma, sizeof(u32), false);
  803. }
  804. i += gmadr_dw_number(s) + 1;
  805. }
  806. return ret;
  807. }
  808. static int cmd_handler_srm(struct parser_exec_state *s)
  809. {
  810. int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
  811. unsigned long gma;
  812. int i, ret = 0;
  813. int cmd_len = cmd_length(s);
  814. for (i = 1; i < cmd_len;) {
  815. ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
  816. if (cmd_val(s, 0) & (1 << 22)) {
  817. gma = cmd_gma(s, i + 1);
  818. if (gmadr_bytes == 8)
  819. gma |= (cmd_gma_hi(s, i + 2)) << 32;
  820. ret |= cmd_address_audit(s, gma, sizeof(u32), false);
  821. }
  822. i += gmadr_dw_number(s) + 1;
  823. }
  824. return ret;
  825. }
  826. struct cmd_interrupt_event {
  827. int pipe_control_notify;
  828. int mi_flush_dw;
  829. int mi_user_interrupt;
  830. };
  831. static struct cmd_interrupt_event cmd_interrupt_events[] = {
  832. [RCS] = {
  833. .pipe_control_notify = RCS_PIPE_CONTROL,
  834. .mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
  835. .mi_user_interrupt = RCS_MI_USER_INTERRUPT,
  836. },
  837. [BCS] = {
  838. .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
  839. .mi_flush_dw = BCS_MI_FLUSH_DW,
  840. .mi_user_interrupt = BCS_MI_USER_INTERRUPT,
  841. },
  842. [VCS] = {
  843. .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
  844. .mi_flush_dw = VCS_MI_FLUSH_DW,
  845. .mi_user_interrupt = VCS_MI_USER_INTERRUPT,
  846. },
  847. [VCS2] = {
  848. .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
  849. .mi_flush_dw = VCS2_MI_FLUSH_DW,
  850. .mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
  851. },
  852. [VECS] = {
  853. .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
  854. .mi_flush_dw = VECS_MI_FLUSH_DW,
  855. .mi_user_interrupt = VECS_MI_USER_INTERRUPT,
  856. },
  857. };
  858. static int cmd_handler_pipe_control(struct parser_exec_state *s)
  859. {
  860. int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
  861. unsigned long gma;
  862. bool index_mode = false;
  863. unsigned int post_sync;
  864. int ret = 0;
  865. post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
  866. /* LRI post sync */
  867. if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
  868. ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
  869. /* post sync */
  870. else if (post_sync) {
  871. if (post_sync == 2)
  872. ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
  873. else if (post_sync == 3)
  874. ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
  875. else if (post_sync == 1) {
  876. /* check ggtt*/
  877. if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
  878. gma = cmd_val(s, 2) & GENMASK(31, 3);
  879. if (gmadr_bytes == 8)
  880. gma |= (cmd_gma_hi(s, 3)) << 32;
  881. /* Store Data Index */
  882. if (cmd_val(s, 1) & (1 << 21))
  883. index_mode = true;
  884. ret |= cmd_address_audit(s, gma, sizeof(u64),
  885. index_mode);
  886. }
  887. }
  888. }
  889. if (ret)
  890. return ret;
  891. if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
  892. set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
  893. s->workload->pending_events);
  894. return 0;
  895. }
  896. static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
  897. {
  898. set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
  899. s->workload->pending_events);
  900. return 0;
  901. }
  902. static int cmd_advance_default(struct parser_exec_state *s)
  903. {
  904. return ip_gma_advance(s, cmd_length(s));
  905. }
  906. static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
  907. {
  908. int ret;
  909. if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
  910. s->buf_type = BATCH_BUFFER_INSTRUCTION;
  911. ret = ip_gma_set(s, s->ret_ip_gma_bb);
  912. s->buf_addr_type = s->saved_buf_addr_type;
  913. } else {
  914. s->buf_type = RING_BUFFER_INSTRUCTION;
  915. s->buf_addr_type = GTT_BUFFER;
  916. if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
  917. s->ret_ip_gma_ring -= s->ring_size;
  918. ret = ip_gma_set(s, s->ret_ip_gma_ring);
  919. }
  920. return ret;
  921. }
  922. struct mi_display_flip_command_info {
  923. int pipe;
  924. int plane;
  925. int event;
  926. i915_reg_t stride_reg;
  927. i915_reg_t ctrl_reg;
  928. i915_reg_t surf_reg;
  929. u64 stride_val;
  930. u64 tile_val;
  931. u64 surf_val;
  932. bool async_flip;
  933. };
  934. struct plane_code_mapping {
  935. int pipe;
  936. int plane;
  937. int event;
  938. };
  939. static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
  940. struct mi_display_flip_command_info *info)
  941. {
  942. struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
  943. struct plane_code_mapping gen8_plane_code[] = {
  944. [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
  945. [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
  946. [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
  947. [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
  948. [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
  949. [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
  950. };
  951. u32 dword0, dword1, dword2;
  952. u32 v;
  953. dword0 = cmd_val(s, 0);
  954. dword1 = cmd_val(s, 1);
  955. dword2 = cmd_val(s, 2);
  956. v = (dword0 & GENMASK(21, 19)) >> 19;
  957. if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
  958. return -EINVAL;
  959. info->pipe = gen8_plane_code[v].pipe;
  960. info->plane = gen8_plane_code[v].plane;
  961. info->event = gen8_plane_code[v].event;
  962. info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
  963. info->tile_val = (dword1 & 0x1);
  964. info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
  965. info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
  966. if (info->plane == PLANE_A) {
  967. info->ctrl_reg = DSPCNTR(info->pipe);
  968. info->stride_reg = DSPSTRIDE(info->pipe);
  969. info->surf_reg = DSPSURF(info->pipe);
  970. } else if (info->plane == PLANE_B) {
  971. info->ctrl_reg = SPRCTL(info->pipe);
  972. info->stride_reg = SPRSTRIDE(info->pipe);
  973. info->surf_reg = SPRSURF(info->pipe);
  974. } else {
  975. WARN_ON(1);
  976. return -EINVAL;
  977. }
  978. return 0;
  979. }
  980. static int skl_decode_mi_display_flip(struct parser_exec_state *s,
  981. struct mi_display_flip_command_info *info)
  982. {
  983. struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
  984. struct intel_vgpu *vgpu = s->vgpu;
  985. u32 dword0 = cmd_val(s, 0);
  986. u32 dword1 = cmd_val(s, 1);
  987. u32 dword2 = cmd_val(s, 2);
  988. u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
  989. info->plane = PRIMARY_PLANE;
  990. switch (plane) {
  991. case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
  992. info->pipe = PIPE_A;
  993. info->event = PRIMARY_A_FLIP_DONE;
  994. break;
  995. case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
  996. info->pipe = PIPE_B;
  997. info->event = PRIMARY_B_FLIP_DONE;
  998. break;
  999. case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
  1000. info->pipe = PIPE_C;
  1001. info->event = PRIMARY_C_FLIP_DONE;
  1002. break;
  1003. case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
  1004. info->pipe = PIPE_A;
  1005. info->event = SPRITE_A_FLIP_DONE;
  1006. info->plane = SPRITE_PLANE;
  1007. break;
  1008. case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
  1009. info->pipe = PIPE_B;
  1010. info->event = SPRITE_B_FLIP_DONE;
  1011. info->plane = SPRITE_PLANE;
  1012. break;
  1013. case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
  1014. info->pipe = PIPE_C;
  1015. info->event = SPRITE_C_FLIP_DONE;
  1016. info->plane = SPRITE_PLANE;
  1017. break;
  1018. default:
  1019. gvt_vgpu_err("unknown plane code %d\n", plane);
  1020. return -EINVAL;
  1021. }
  1022. info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
  1023. info->tile_val = (dword1 & GENMASK(2, 0));
  1024. info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
  1025. info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
  1026. info->ctrl_reg = DSPCNTR(info->pipe);
  1027. info->stride_reg = DSPSTRIDE(info->pipe);
  1028. info->surf_reg = DSPSURF(info->pipe);
  1029. return 0;
  1030. }
  1031. static int gen8_check_mi_display_flip(struct parser_exec_state *s,
  1032. struct mi_display_flip_command_info *info)
  1033. {
  1034. struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
  1035. u32 stride, tile;
  1036. if (!info->async_flip)
  1037. return 0;
  1038. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  1039. stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
  1040. tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
  1041. GENMASK(12, 10)) >> 10;
  1042. } else {
  1043. stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
  1044. GENMASK(15, 6)) >> 6;
  1045. tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
  1046. }
  1047. if (stride != info->stride_val)
  1048. gvt_dbg_cmd("cannot change stride during async flip\n");
  1049. if (tile != info->tile_val)
  1050. gvt_dbg_cmd("cannot change tile during async flip\n");
  1051. return 0;
  1052. }
  1053. static int gen8_update_plane_mmio_from_mi_display_flip(
  1054. struct parser_exec_state *s,
  1055. struct mi_display_flip_command_info *info)
  1056. {
  1057. struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
  1058. struct intel_vgpu *vgpu = s->vgpu;
  1059. set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
  1060. info->surf_val << 12);
  1061. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  1062. set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
  1063. info->stride_val);
  1064. set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
  1065. info->tile_val << 10);
  1066. } else {
  1067. set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
  1068. info->stride_val << 6);
  1069. set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
  1070. info->tile_val << 10);
  1071. }
  1072. vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
  1073. intel_vgpu_trigger_virtual_event(vgpu, info->event);
  1074. return 0;
  1075. }
  1076. static int decode_mi_display_flip(struct parser_exec_state *s,
  1077. struct mi_display_flip_command_info *info)
  1078. {
  1079. struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
  1080. if (IS_BROADWELL(dev_priv))
  1081. return gen8_decode_mi_display_flip(s, info);
  1082. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
  1083. return skl_decode_mi_display_flip(s, info);
  1084. return -ENODEV;
  1085. }
  1086. static int check_mi_display_flip(struct parser_exec_state *s,
  1087. struct mi_display_flip_command_info *info)
  1088. {
  1089. struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
  1090. if (IS_BROADWELL(dev_priv)
  1091. || IS_SKYLAKE(dev_priv)
  1092. || IS_KABYLAKE(dev_priv))
  1093. return gen8_check_mi_display_flip(s, info);
  1094. return -ENODEV;
  1095. }
  1096. static int update_plane_mmio_from_mi_display_flip(
  1097. struct parser_exec_state *s,
  1098. struct mi_display_flip_command_info *info)
  1099. {
  1100. struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
  1101. if (IS_BROADWELL(dev_priv)
  1102. || IS_SKYLAKE(dev_priv)
  1103. || IS_KABYLAKE(dev_priv))
  1104. return gen8_update_plane_mmio_from_mi_display_flip(s, info);
  1105. return -ENODEV;
  1106. }
  1107. static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
  1108. {
  1109. struct mi_display_flip_command_info info;
  1110. struct intel_vgpu *vgpu = s->vgpu;
  1111. int ret;
  1112. int i;
  1113. int len = cmd_length(s);
  1114. ret = decode_mi_display_flip(s, &info);
  1115. if (ret) {
  1116. gvt_vgpu_err("fail to decode MI display flip command\n");
  1117. return ret;
  1118. }
  1119. ret = check_mi_display_flip(s, &info);
  1120. if (ret) {
  1121. gvt_vgpu_err("invalid MI display flip command\n");
  1122. return ret;
  1123. }
  1124. ret = update_plane_mmio_from_mi_display_flip(s, &info);
  1125. if (ret) {
  1126. gvt_vgpu_err("fail to update plane mmio\n");
  1127. return ret;
  1128. }
  1129. for (i = 0; i < len; i++)
  1130. patch_value(s, cmd_ptr(s, i), MI_NOOP);
  1131. return 0;
  1132. }
  1133. static bool is_wait_for_flip_pending(u32 cmd)
  1134. {
  1135. return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
  1136. MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
  1137. MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
  1138. MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
  1139. MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
  1140. MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
  1141. }
  1142. static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
  1143. {
  1144. u32 cmd = cmd_val(s, 0);
  1145. if (!is_wait_for_flip_pending(cmd))
  1146. return 0;
  1147. patch_value(s, cmd_ptr(s, 0), MI_NOOP);
  1148. return 0;
  1149. }
  1150. static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
  1151. {
  1152. unsigned long addr;
  1153. unsigned long gma_high, gma_low;
  1154. int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
  1155. if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
  1156. return INTEL_GVT_INVALID_ADDR;
  1157. gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
  1158. if (gmadr_bytes == 4) {
  1159. addr = gma_low;
  1160. } else {
  1161. gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
  1162. addr = (((unsigned long)gma_high) << 32) | gma_low;
  1163. }
  1164. return addr;
  1165. }
  1166. static inline int cmd_address_audit(struct parser_exec_state *s,
  1167. unsigned long guest_gma, int op_size, bool index_mode)
  1168. {
  1169. struct intel_vgpu *vgpu = s->vgpu;
  1170. u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
  1171. int i;
  1172. int ret;
  1173. if (op_size > max_surface_size) {
  1174. gvt_vgpu_err("command address audit fail name %s\n",
  1175. s->info->name);
  1176. return -EINVAL;
  1177. }
  1178. if (index_mode) {
  1179. if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
  1180. ret = -EINVAL;
  1181. goto err;
  1182. }
  1183. } else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
  1184. (!vgpu_gmadr_is_valid(s->vgpu,
  1185. guest_gma + op_size - 1))) {
  1186. ret = -EINVAL;
  1187. goto err;
  1188. }
  1189. return 0;
  1190. err:
  1191. gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
  1192. s->info->name, guest_gma, op_size);
  1193. pr_err("cmd dump: ");
  1194. for (i = 0; i < cmd_length(s); i++) {
  1195. if (!(i % 4))
  1196. pr_err("\n%08x ", cmd_val(s, i));
  1197. else
  1198. pr_err("%08x ", cmd_val(s, i));
  1199. }
  1200. pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
  1201. vgpu->id,
  1202. vgpu_aperture_gmadr_base(vgpu),
  1203. vgpu_aperture_gmadr_end(vgpu),
  1204. vgpu_hidden_gmadr_base(vgpu),
  1205. vgpu_hidden_gmadr_end(vgpu));
  1206. return ret;
  1207. }
  1208. static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
  1209. {
  1210. int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
  1211. int op_size = (cmd_length(s) - 3) * sizeof(u32);
  1212. int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
  1213. unsigned long gma, gma_low, gma_high;
  1214. int ret = 0;
  1215. /* check ppggt */
  1216. if (!(cmd_val(s, 0) & (1 << 22)))
  1217. return 0;
  1218. gma = cmd_val(s, 2) & GENMASK(31, 2);
  1219. if (gmadr_bytes == 8) {
  1220. gma_low = cmd_val(s, 1) & GENMASK(31, 2);
  1221. gma_high = cmd_val(s, 2) & GENMASK(15, 0);
  1222. gma = (gma_high << 32) | gma_low;
  1223. core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
  1224. }
  1225. ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
  1226. return ret;
  1227. }
  1228. static inline int unexpected_cmd(struct parser_exec_state *s)
  1229. {
  1230. struct intel_vgpu *vgpu = s->vgpu;
  1231. gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
  1232. return -EINVAL;
  1233. }
  1234. static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
  1235. {
  1236. return unexpected_cmd(s);
  1237. }
  1238. static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
  1239. {
  1240. return unexpected_cmd(s);
  1241. }
  1242. static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
  1243. {
  1244. return unexpected_cmd(s);
  1245. }
  1246. static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
  1247. {
  1248. int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
  1249. int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
  1250. sizeof(u32);
  1251. unsigned long gma, gma_high;
  1252. int ret = 0;
  1253. if (!(cmd_val(s, 0) & (1 << 22)))
  1254. return ret;
  1255. gma = cmd_val(s, 1) & GENMASK(31, 2);
  1256. if (gmadr_bytes == 8) {
  1257. gma_high = cmd_val(s, 2) & GENMASK(15, 0);
  1258. gma = (gma_high << 32) | gma;
  1259. }
  1260. ret = cmd_address_audit(s, gma, op_size, false);
  1261. return ret;
  1262. }
  1263. static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
  1264. {
  1265. return unexpected_cmd(s);
  1266. }
  1267. static int cmd_handler_mi_clflush(struct parser_exec_state *s)
  1268. {
  1269. return unexpected_cmd(s);
  1270. }
  1271. static int cmd_handler_mi_conditional_batch_buffer_end(
  1272. struct parser_exec_state *s)
  1273. {
  1274. return unexpected_cmd(s);
  1275. }
  1276. static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
  1277. {
  1278. return unexpected_cmd(s);
  1279. }
  1280. static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
  1281. {
  1282. int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
  1283. unsigned long gma;
  1284. bool index_mode = false;
  1285. int ret = 0;
  1286. /* Check post-sync and ppgtt bit */
  1287. if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
  1288. gma = cmd_val(s, 1) & GENMASK(31, 3);
  1289. if (gmadr_bytes == 8)
  1290. gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
  1291. /* Store Data Index */
  1292. if (cmd_val(s, 0) & (1 << 21))
  1293. index_mode = true;
  1294. ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
  1295. }
  1296. /* Check notify bit */
  1297. if ((cmd_val(s, 0) & (1 << 8)))
  1298. set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
  1299. s->workload->pending_events);
  1300. return ret;
  1301. }
  1302. static void addr_type_update_snb(struct parser_exec_state *s)
  1303. {
  1304. if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
  1305. (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
  1306. s->buf_addr_type = PPGTT_BUFFER;
  1307. }
  1308. }
  1309. static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
  1310. unsigned long gma, unsigned long end_gma, void *va)
  1311. {
  1312. unsigned long copy_len, offset;
  1313. unsigned long len = 0;
  1314. unsigned long gpa;
  1315. while (gma != end_gma) {
  1316. gpa = intel_vgpu_gma_to_gpa(mm, gma);
  1317. if (gpa == INTEL_GVT_INVALID_ADDR) {
  1318. gvt_vgpu_err("invalid gma address: %lx\n", gma);
  1319. return -EFAULT;
  1320. }
  1321. offset = gma & (GTT_PAGE_SIZE - 1);
  1322. copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
  1323. GTT_PAGE_SIZE - offset : end_gma - gma;
  1324. intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
  1325. len += copy_len;
  1326. gma += copy_len;
  1327. }
  1328. return len;
  1329. }
  1330. /*
  1331. * Check whether a batch buffer needs to be scanned. Currently
  1332. * the only criteria is based on privilege.
  1333. */
  1334. static int batch_buffer_needs_scan(struct parser_exec_state *s)
  1335. {
  1336. struct intel_gvt *gvt = s->vgpu->gvt;
  1337. if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
  1338. || IS_KABYLAKE(gvt->dev_priv)) {
  1339. /* BDW decides privilege based on address space */
  1340. if (cmd_val(s, 0) & (1 << 8))
  1341. return 0;
  1342. }
  1343. return 1;
  1344. }
  1345. static uint32_t find_bb_size(struct parser_exec_state *s)
  1346. {
  1347. unsigned long gma = 0;
  1348. struct cmd_info *info;
  1349. uint32_t bb_size = 0;
  1350. uint32_t cmd_len = 0;
  1351. bool met_bb_end = false;
  1352. struct intel_vgpu *vgpu = s->vgpu;
  1353. u32 cmd;
  1354. /* get the start gm address of the batch buffer */
  1355. gma = get_gma_bb_from_cmd(s, 1);
  1356. cmd = cmd_val(s, 0);
  1357. info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
  1358. if (info == NULL) {
  1359. gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
  1360. cmd, get_opcode(cmd, s->ring_id));
  1361. return -EINVAL;
  1362. }
  1363. do {
  1364. copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
  1365. gma, gma + 4, &cmd);
  1366. info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
  1367. if (info == NULL) {
  1368. gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
  1369. cmd, get_opcode(cmd, s->ring_id));
  1370. return -EINVAL;
  1371. }
  1372. if (info->opcode == OP_MI_BATCH_BUFFER_END) {
  1373. met_bb_end = true;
  1374. } else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
  1375. if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
  1376. /* chained batch buffer */
  1377. met_bb_end = true;
  1378. }
  1379. }
  1380. cmd_len = get_cmd_length(info, cmd) << 2;
  1381. bb_size += cmd_len;
  1382. gma += cmd_len;
  1383. } while (!met_bb_end);
  1384. return bb_size;
  1385. }
  1386. static int perform_bb_shadow(struct parser_exec_state *s)
  1387. {
  1388. struct intel_shadow_bb_entry *entry_obj;
  1389. struct intel_vgpu *vgpu = s->vgpu;
  1390. unsigned long gma = 0;
  1391. uint32_t bb_size;
  1392. void *dst = NULL;
  1393. int ret = 0;
  1394. /* get the start gm address of the batch buffer */
  1395. gma = get_gma_bb_from_cmd(s, 1);
  1396. /* get the size of the batch buffer */
  1397. bb_size = find_bb_size(s);
  1398. /* allocate shadow batch buffer */
  1399. entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
  1400. if (entry_obj == NULL)
  1401. return -ENOMEM;
  1402. entry_obj->obj =
  1403. i915_gem_object_create(s->vgpu->gvt->dev_priv,
  1404. roundup(bb_size, PAGE_SIZE));
  1405. if (IS_ERR(entry_obj->obj)) {
  1406. ret = PTR_ERR(entry_obj->obj);
  1407. goto free_entry;
  1408. }
  1409. entry_obj->len = bb_size;
  1410. INIT_LIST_HEAD(&entry_obj->list);
  1411. dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
  1412. if (IS_ERR(dst)) {
  1413. ret = PTR_ERR(dst);
  1414. goto put_obj;
  1415. }
  1416. ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
  1417. if (ret) {
  1418. gvt_vgpu_err("failed to set shadow batch to CPU\n");
  1419. goto unmap_src;
  1420. }
  1421. entry_obj->va = dst;
  1422. entry_obj->bb_start_cmd_va = s->ip_va;
  1423. /* copy batch buffer to shadow batch buffer*/
  1424. ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
  1425. gma, gma + bb_size,
  1426. dst);
  1427. if (ret < 0) {
  1428. gvt_vgpu_err("fail to copy guest ring buffer\n");
  1429. goto unmap_src;
  1430. }
  1431. list_add(&entry_obj->list, &s->workload->shadow_bb);
  1432. /*
  1433. * ip_va saves the virtual address of the shadow batch buffer, while
  1434. * ip_gma saves the graphics address of the original batch buffer.
  1435. * As the shadow batch buffer is just a copy from the originial one,
  1436. * it should be right to use shadow batch buffer'va and original batch
  1437. * buffer's gma in pair. After all, we don't want to pin the shadow
  1438. * buffer here (too early).
  1439. */
  1440. s->ip_va = dst;
  1441. s->ip_gma = gma;
  1442. return 0;
  1443. unmap_src:
  1444. i915_gem_object_unpin_map(entry_obj->obj);
  1445. put_obj:
  1446. i915_gem_object_put(entry_obj->obj);
  1447. free_entry:
  1448. kfree(entry_obj);
  1449. return ret;
  1450. }
  1451. static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
  1452. {
  1453. bool second_level;
  1454. int ret = 0;
  1455. struct intel_vgpu *vgpu = s->vgpu;
  1456. if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
  1457. gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
  1458. return -EINVAL;
  1459. }
  1460. second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
  1461. if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
  1462. gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
  1463. return -EINVAL;
  1464. }
  1465. s->saved_buf_addr_type = s->buf_addr_type;
  1466. addr_type_update_snb(s);
  1467. if (s->buf_type == RING_BUFFER_INSTRUCTION) {
  1468. s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
  1469. s->buf_type = BATCH_BUFFER_INSTRUCTION;
  1470. } else if (second_level) {
  1471. s->buf_type = BATCH_BUFFER_2ND_LEVEL;
  1472. s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
  1473. s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
  1474. }
  1475. if (batch_buffer_needs_scan(s)) {
  1476. ret = perform_bb_shadow(s);
  1477. if (ret < 0)
  1478. gvt_vgpu_err("invalid shadow batch buffer\n");
  1479. } else {
  1480. /* emulate a batch buffer end to do return right */
  1481. ret = cmd_handler_mi_batch_buffer_end(s);
  1482. if (ret < 0)
  1483. return ret;
  1484. }
  1485. return ret;
  1486. }
  1487. static struct cmd_info cmd_info[] = {
  1488. {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
  1489. {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
  1490. 0, 1, NULL},
  1491. {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
  1492. 0, 1, cmd_handler_mi_user_interrupt},
  1493. {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
  1494. D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
  1495. {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
  1496. {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
  1497. NULL},
  1498. {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
  1499. NULL},
  1500. {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
  1501. NULL},
  1502. {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
  1503. NULL},
  1504. {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
  1505. D_ALL, 0, 1, NULL},
  1506. {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
  1507. F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
  1508. cmd_handler_mi_batch_buffer_end},
  1509. {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
  1510. 0, 1, NULL},
  1511. {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
  1512. NULL},
  1513. {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
  1514. D_ALL, 0, 1, NULL},
  1515. {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
  1516. NULL},
  1517. {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
  1518. NULL},
  1519. {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
  1520. R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
  1521. {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
  1522. 0, 8, NULL},
  1523. {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
  1524. {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1525. {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
  1526. D_BDW_PLUS, 0, 8, NULL},
  1527. {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
  1528. ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
  1529. {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
  1530. ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
  1531. {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
  1532. 0, 8, cmd_handler_mi_store_data_index},
  1533. {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
  1534. D_ALL, 0, 8, cmd_handler_lri},
  1535. {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
  1536. cmd_handler_mi_update_gtt},
  1537. {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
  1538. D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
  1539. {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
  1540. cmd_handler_mi_flush_dw},
  1541. {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
  1542. 10, cmd_handler_mi_clflush},
  1543. {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
  1544. D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
  1545. {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
  1546. D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
  1547. {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
  1548. D_ALL, 0, 8, cmd_handler_lrr},
  1549. {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
  1550. D_ALL, 0, 8, NULL},
  1551. {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
  1552. ADDR_FIX_1(2), 8, NULL},
  1553. {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
  1554. ADDR_FIX_1(2), 8, NULL},
  1555. {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
  1556. 8, cmd_handler_mi_op_2e},
  1557. {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
  1558. 8, cmd_handler_mi_op_2f},
  1559. {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
  1560. F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
  1561. cmd_handler_mi_batch_buffer_start},
  1562. {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
  1563. F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
  1564. cmd_handler_mi_conditional_batch_buffer_end},
  1565. {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
  1566. R_RCS | R_BCS, D_ALL, 0, 2, NULL},
  1567. {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
  1568. ADDR_FIX_2(4, 7), 8, NULL},
  1569. {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
  1570. 0, 8, NULL},
  1571. {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
  1572. F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
  1573. {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
  1574. {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
  1575. 0, 8, NULL},
  1576. {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
  1577. ADDR_FIX_1(3), 8, NULL},
  1578. {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
  1579. D_ALL, 0, 8, NULL},
  1580. {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
  1581. ADDR_FIX_1(4), 8, NULL},
  1582. {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
  1583. ADDR_FIX_2(4, 5), 8, NULL},
  1584. {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
  1585. ADDR_FIX_1(4), 8, NULL},
  1586. {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
  1587. ADDR_FIX_2(4, 7), 8, NULL},
  1588. {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
  1589. D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
  1590. {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
  1591. {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
  1592. D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
  1593. {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
  1594. R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
  1595. {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
  1596. OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
  1597. F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
  1598. {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
  1599. D_ALL, ADDR_FIX_1(4), 8, NULL},
  1600. {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
  1601. F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
  1602. {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
  1603. D_ALL, ADDR_FIX_1(4), 8, NULL},
  1604. {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
  1605. D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
  1606. {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
  1607. F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
  1608. {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
  1609. OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
  1610. F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
  1611. {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
  1612. ADDR_FIX_2(4, 5), 8, NULL},
  1613. {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
  1614. F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
  1615. {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
  1616. OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
  1617. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1618. {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
  1619. OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
  1620. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1621. {"3DSTATE_BLEND_STATE_POINTERS",
  1622. OP_3DSTATE_BLEND_STATE_POINTERS,
  1623. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1624. {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
  1625. OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
  1626. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1627. {"3DSTATE_BINDING_TABLE_POINTERS_VS",
  1628. OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
  1629. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1630. {"3DSTATE_BINDING_TABLE_POINTERS_HS",
  1631. OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
  1632. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1633. {"3DSTATE_BINDING_TABLE_POINTERS_DS",
  1634. OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
  1635. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1636. {"3DSTATE_BINDING_TABLE_POINTERS_GS",
  1637. OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
  1638. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1639. {"3DSTATE_BINDING_TABLE_POINTERS_PS",
  1640. OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
  1641. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1642. {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
  1643. OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
  1644. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1645. {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
  1646. OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
  1647. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1648. {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
  1649. OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
  1650. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1651. {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
  1652. OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
  1653. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1654. {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
  1655. OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
  1656. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1657. {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
  1658. 0, 8, NULL},
  1659. {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
  1660. 0, 8, NULL},
  1661. {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
  1662. 0, 8, NULL},
  1663. {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
  1664. 0, 8, NULL},
  1665. {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
  1666. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1667. {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
  1668. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1669. {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
  1670. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1671. {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
  1672. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1673. {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
  1674. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1675. {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
  1676. F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
  1677. {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
  1678. F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
  1679. {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
  1680. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1681. {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
  1682. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1683. {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
  1684. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1685. {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
  1686. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1687. {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
  1688. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1689. {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
  1690. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1691. {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
  1692. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1693. {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
  1694. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1695. {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
  1696. F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
  1697. {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
  1698. F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
  1699. {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
  1700. F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
  1701. {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
  1702. F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
  1703. {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
  1704. F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
  1705. {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
  1706. D_BDW_PLUS, 0, 8, NULL},
  1707. {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
  1708. NULL},
  1709. {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
  1710. D_BDW_PLUS, 0, 8, NULL},
  1711. {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
  1712. D_BDW_PLUS, 0, 8, NULL},
  1713. {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
  1714. 8, NULL},
  1715. {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
  1716. R_RCS, D_BDW_PLUS, 0, 8, NULL},
  1717. {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
  1718. 8, NULL},
  1719. {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
  1720. NULL},
  1721. {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
  1722. NULL},
  1723. {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
  1724. NULL},
  1725. {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
  1726. D_BDW_PLUS, 0, 8, NULL},
  1727. {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
  1728. R_RCS, D_ALL, 0, 8, NULL},
  1729. {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
  1730. D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
  1731. {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
  1732. R_RCS, D_ALL, 0, 1, NULL},
  1733. {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1734. {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
  1735. R_RCS, D_ALL, 0, 8, NULL},
  1736. {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
  1737. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1738. {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1739. {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1740. {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1741. {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
  1742. D_BDW_PLUS, 0, 8, NULL},
  1743. {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
  1744. D_BDW_PLUS, 0, 8, NULL},
  1745. {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
  1746. D_ALL, 0, 8, NULL},
  1747. {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
  1748. D_BDW_PLUS, 0, 8, NULL},
  1749. {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
  1750. D_BDW_PLUS, 0, 8, NULL},
  1751. {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1752. {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1753. {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1754. {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
  1755. D_ALL, 0, 8, NULL},
  1756. {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1757. {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1758. {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
  1759. R_RCS, D_ALL, 0, 8, NULL},
  1760. {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
  1761. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1762. {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
  1763. 0, 8, NULL},
  1764. {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
  1765. D_ALL, ADDR_FIX_1(2), 8, NULL},
  1766. {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
  1767. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1768. {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
  1769. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1770. {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
  1771. D_ALL, 0, 8, NULL},
  1772. {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
  1773. D_ALL, 0, 8, NULL},
  1774. {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
  1775. D_ALL, 0, 8, NULL},
  1776. {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
  1777. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1778. {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
  1779. D_BDW_PLUS, 0, 8, NULL},
  1780. {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
  1781. D_ALL, ADDR_FIX_1(2), 8, NULL},
  1782. {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
  1783. R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
  1784. {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
  1785. R_RCS, D_ALL, 0, 8, NULL},
  1786. {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
  1787. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1788. {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
  1789. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1790. {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
  1791. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1792. {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
  1793. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1794. {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
  1795. F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1796. {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
  1797. R_RCS, D_ALL, 0, 8, NULL},
  1798. {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
  1799. D_ALL, 0, 9, NULL},
  1800. {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
  1801. ADDR_FIX_2(2, 4), 8, NULL},
  1802. {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
  1803. OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
  1804. F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
  1805. {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
  1806. F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
  1807. {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
  1808. OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
  1809. F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
  1810. {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
  1811. D_BDW_PLUS, 0, 8, NULL},
  1812. {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
  1813. ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
  1814. {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1815. {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
  1816. 1, NULL},
  1817. {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
  1818. ADDR_FIX_1(1), 8, NULL},
  1819. {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1820. {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
  1821. ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
  1822. {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
  1823. ADDR_FIX_1(1), 8, NULL},
  1824. {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1825. {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
  1826. {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
  1827. 0, 8, NULL},
  1828. {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
  1829. D_SKL_PLUS, 0, 8, NULL},
  1830. {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
  1831. F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
  1832. {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
  1833. 0, 16, NULL},
  1834. {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
  1835. 0, 16, NULL},
  1836. {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
  1837. {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
  1838. 0, 16, NULL},
  1839. {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
  1840. 0, 16, NULL},
  1841. {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
  1842. 0, 16, NULL},
  1843. {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
  1844. 0, 8, NULL},
  1845. {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
  1846. NULL},
  1847. {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
  1848. F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
  1849. {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
  1850. R_VCS, D_ALL, 0, 12, NULL},
  1851. {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
  1852. R_VCS, D_ALL, 0, 12, NULL},
  1853. {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
  1854. R_VCS, D_BDW_PLUS, 0, 12, NULL},
  1855. {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
  1856. F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
  1857. {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
  1858. F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
  1859. {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
  1860. {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
  1861. R_VCS, D_ALL, 0, 12, NULL},
  1862. {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
  1863. R_VCS, D_ALL, 0, 12, NULL},
  1864. {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
  1865. R_VCS, D_ALL, 0, 12, NULL},
  1866. {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
  1867. R_VCS, D_ALL, 0, 12, NULL},
  1868. {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
  1869. R_VCS, D_ALL, 0, 12, NULL},
  1870. {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
  1871. R_VCS, D_ALL, 0, 12, NULL},
  1872. {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
  1873. R_VCS, D_ALL, 0, 6, NULL},
  1874. {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
  1875. R_VCS, D_ALL, 0, 12, NULL},
  1876. {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
  1877. R_VCS, D_ALL, 0, 12, NULL},
  1878. {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
  1879. R_VCS, D_ALL, 0, 12, NULL},
  1880. {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
  1881. R_VCS, D_ALL, 0, 12, NULL},
  1882. {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
  1883. R_VCS, D_ALL, 0, 12, NULL},
  1884. {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
  1885. R_VCS, D_ALL, 0, 12, NULL},
  1886. {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
  1887. R_VCS, D_ALL, 0, 12, NULL},
  1888. {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
  1889. R_VCS, D_ALL, 0, 12, NULL},
  1890. {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
  1891. R_VCS, D_ALL, 0, 12, NULL},
  1892. {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
  1893. R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
  1894. {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
  1895. R_VCS, D_ALL, 0, 12, NULL},
  1896. {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
  1897. R_VCS, D_ALL, 0, 12, NULL},
  1898. {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
  1899. R_VCS, D_ALL, 0, 12, NULL},
  1900. {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
  1901. R_VCS, D_ALL, 0, 12, NULL},
  1902. {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
  1903. R_VCS, D_ALL, 0, 12, NULL},
  1904. {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
  1905. R_VCS, D_ALL, 0, 12, NULL},
  1906. {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
  1907. R_VCS, D_ALL, 0, 12, NULL},
  1908. {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
  1909. R_VCS, D_ALL, 0, 12, NULL},
  1910. {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
  1911. R_VCS, D_ALL, 0, 12, NULL},
  1912. {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
  1913. R_VCS, D_ALL, 0, 12, NULL},
  1914. {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
  1915. R_VCS, D_ALL, 0, 12, NULL},
  1916. {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
  1917. 0, 16, NULL},
  1918. {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
  1919. {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
  1920. {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
  1921. R_VCS, D_ALL, 0, 12, NULL},
  1922. {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
  1923. R_VCS, D_ALL, 0, 12, NULL},
  1924. {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
  1925. R_VCS, D_ALL, 0, 12, NULL},
  1926. {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
  1927. {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
  1928. 0, 12, NULL},
  1929. {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
  1930. 0, 20, NULL},
  1931. };
  1932. static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
  1933. {
  1934. hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
  1935. }
  1936. #define GVT_MAX_CMD_LENGTH 20 /* In Dword */
  1937. static void trace_cs_command(struct parser_exec_state *s,
  1938. cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
  1939. {
  1940. /* This buffer is used by ftrace to store all commands copied from
  1941. * guest gma space. Sometimes commands can cross pages, this should
  1942. * not be handled in ftrace logic. So this is just used as a
  1943. * 'bounce buffer'
  1944. */
  1945. u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
  1946. int i;
  1947. u32 cmd_len = cmd_length(s);
  1948. /* The chosen value of GVT_MAX_CMD_LENGTH are just based on
  1949. * following two considerations:
  1950. * 1) From observation, most common ring commands is not that long.
  1951. * But there are execeptions. So it indeed makes sence to observe
  1952. * longer commands.
  1953. * 2) From the performance and debugging point of view, dumping all
  1954. * contents of very commands is not necessary.
  1955. * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
  1956. * future for performance considerations.
  1957. */
  1958. if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
  1959. gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
  1960. cmd_len = GVT_MAX_CMD_LENGTH;
  1961. }
  1962. for (i = 0; i < cmd_len; i++)
  1963. cmd_trace_buf[i] = cmd_val(s, i);
  1964. trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
  1965. cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
  1966. cost_pre_cmd_handler, cost_cmd_handler);
  1967. }
  1968. /* call the cmd handler, and advance ip */
  1969. static int cmd_parser_exec(struct parser_exec_state *s)
  1970. {
  1971. struct cmd_info *info;
  1972. u32 cmd;
  1973. int ret = 0;
  1974. cycles_t t0, t1, t2;
  1975. struct parser_exec_state s_before_advance_custom;
  1976. struct intel_vgpu *vgpu = s->vgpu;
  1977. t0 = get_cycles();
  1978. cmd = cmd_val(s, 0);
  1979. info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
  1980. if (info == NULL) {
  1981. gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
  1982. cmd, get_opcode(cmd, s->ring_id));
  1983. return -EINVAL;
  1984. }
  1985. gvt_dbg_cmd("%s\n", info->name);
  1986. s->info = info;
  1987. t1 = get_cycles();
  1988. s_before_advance_custom = *s;
  1989. if (info->handler) {
  1990. ret = info->handler(s);
  1991. if (ret < 0) {
  1992. gvt_vgpu_err("%s handler error\n", info->name);
  1993. return ret;
  1994. }
  1995. }
  1996. t2 = get_cycles();
  1997. trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
  1998. if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
  1999. ret = cmd_advance_default(s);
  2000. if (ret) {
  2001. gvt_vgpu_err("%s IP advance error\n", info->name);
  2002. return ret;
  2003. }
  2004. }
  2005. return 0;
  2006. }
  2007. static inline bool gma_out_of_range(unsigned long gma,
  2008. unsigned long gma_head, unsigned int gma_tail)
  2009. {
  2010. if (gma_tail >= gma_head)
  2011. return (gma < gma_head) || (gma > gma_tail);
  2012. else
  2013. return (gma > gma_tail) && (gma < gma_head);
  2014. }
  2015. static int command_scan(struct parser_exec_state *s,
  2016. unsigned long rb_head, unsigned long rb_tail,
  2017. unsigned long rb_start, unsigned long rb_len)
  2018. {
  2019. unsigned long gma_head, gma_tail, gma_bottom;
  2020. int ret = 0;
  2021. struct intel_vgpu *vgpu = s->vgpu;
  2022. gma_head = rb_start + rb_head;
  2023. gma_tail = rb_start + rb_tail;
  2024. gma_bottom = rb_start + rb_len;
  2025. gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
  2026. while (s->ip_gma != gma_tail) {
  2027. if (s->buf_type == RING_BUFFER_INSTRUCTION) {
  2028. if (!(s->ip_gma >= rb_start) ||
  2029. !(s->ip_gma < gma_bottom)) {
  2030. gvt_vgpu_err("ip_gma %lx out of ring scope."
  2031. "(base:0x%lx, bottom: 0x%lx)\n",
  2032. s->ip_gma, rb_start,
  2033. gma_bottom);
  2034. parser_exec_state_dump(s);
  2035. return -EINVAL;
  2036. }
  2037. if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
  2038. gvt_vgpu_err("ip_gma %lx out of range."
  2039. "base 0x%lx head 0x%lx tail 0x%lx\n",
  2040. s->ip_gma, rb_start,
  2041. rb_head, rb_tail);
  2042. parser_exec_state_dump(s);
  2043. break;
  2044. }
  2045. }
  2046. ret = cmd_parser_exec(s);
  2047. if (ret) {
  2048. gvt_vgpu_err("cmd parser error\n");
  2049. parser_exec_state_dump(s);
  2050. break;
  2051. }
  2052. }
  2053. gvt_dbg_cmd("scan_end\n");
  2054. return ret;
  2055. }
  2056. static int scan_workload(struct intel_vgpu_workload *workload)
  2057. {
  2058. unsigned long gma_head, gma_tail, gma_bottom;
  2059. struct parser_exec_state s;
  2060. int ret = 0;
  2061. /* ring base is page aligned */
  2062. if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
  2063. return -EINVAL;
  2064. gma_head = workload->rb_start + workload->rb_head;
  2065. gma_tail = workload->rb_start + workload->rb_tail;
  2066. gma_bottom = workload->rb_start + _RING_CTL_BUF_SIZE(workload->rb_ctl);
  2067. s.buf_type = RING_BUFFER_INSTRUCTION;
  2068. s.buf_addr_type = GTT_BUFFER;
  2069. s.vgpu = workload->vgpu;
  2070. s.ring_id = workload->ring_id;
  2071. s.ring_start = workload->rb_start;
  2072. s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
  2073. s.ring_head = gma_head;
  2074. s.ring_tail = gma_tail;
  2075. s.rb_va = workload->shadow_ring_buffer_va;
  2076. s.workload = workload;
  2077. if ((bypass_scan_mask & (1 << workload->ring_id)) ||
  2078. gma_head == gma_tail)
  2079. return 0;
  2080. ret = ip_gma_set(&s, gma_head);
  2081. if (ret)
  2082. goto out;
  2083. ret = command_scan(&s, workload->rb_head, workload->rb_tail,
  2084. workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
  2085. out:
  2086. return ret;
  2087. }
  2088. static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
  2089. {
  2090. unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
  2091. struct parser_exec_state s;
  2092. int ret = 0;
  2093. struct intel_vgpu_workload *workload = container_of(wa_ctx,
  2094. struct intel_vgpu_workload,
  2095. wa_ctx);
  2096. /* ring base is page aligned */
  2097. if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
  2098. return -EINVAL;
  2099. ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
  2100. ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
  2101. PAGE_SIZE);
  2102. gma_head = wa_ctx->indirect_ctx.guest_gma;
  2103. gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
  2104. gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
  2105. s.buf_type = RING_BUFFER_INSTRUCTION;
  2106. s.buf_addr_type = GTT_BUFFER;
  2107. s.vgpu = workload->vgpu;
  2108. s.ring_id = workload->ring_id;
  2109. s.ring_start = wa_ctx->indirect_ctx.guest_gma;
  2110. s.ring_size = ring_size;
  2111. s.ring_head = gma_head;
  2112. s.ring_tail = gma_tail;
  2113. s.rb_va = wa_ctx->indirect_ctx.shadow_va;
  2114. s.workload = workload;
  2115. ret = ip_gma_set(&s, gma_head);
  2116. if (ret)
  2117. goto out;
  2118. ret = command_scan(&s, 0, ring_tail,
  2119. wa_ctx->indirect_ctx.guest_gma, ring_size);
  2120. out:
  2121. return ret;
  2122. }
  2123. static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
  2124. {
  2125. struct intel_vgpu *vgpu = workload->vgpu;
  2126. unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
  2127. u32 *cs;
  2128. int ret;
  2129. guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
  2130. /* calculate workload ring buffer size */
  2131. workload->rb_len = (workload->rb_tail + guest_rb_size -
  2132. workload->rb_head) % guest_rb_size;
  2133. gma_head = workload->rb_start + workload->rb_head;
  2134. gma_tail = workload->rb_start + workload->rb_tail;
  2135. gma_top = workload->rb_start + guest_rb_size;
  2136. /* allocate shadow ring buffer */
  2137. cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
  2138. if (IS_ERR(cs))
  2139. return PTR_ERR(cs);
  2140. /* get shadow ring buffer va */
  2141. workload->shadow_ring_buffer_va = cs;
  2142. /* head > tail --> copy head <-> top */
  2143. if (gma_head > gma_tail) {
  2144. ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
  2145. gma_head, gma_top, cs);
  2146. if (ret < 0) {
  2147. gvt_vgpu_err("fail to copy guest ring buffer\n");
  2148. return ret;
  2149. }
  2150. cs += ret / sizeof(u32);
  2151. gma_head = workload->rb_start;
  2152. }
  2153. /* copy head or start <-> tail */
  2154. ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
  2155. if (ret < 0) {
  2156. gvt_vgpu_err("fail to copy guest ring buffer\n");
  2157. return ret;
  2158. }
  2159. cs += ret / sizeof(u32);
  2160. intel_ring_advance(workload->req, cs);
  2161. return 0;
  2162. }
  2163. int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
  2164. {
  2165. int ret;
  2166. struct intel_vgpu *vgpu = workload->vgpu;
  2167. ret = shadow_workload_ring_buffer(workload);
  2168. if (ret) {
  2169. gvt_vgpu_err("fail to shadow workload ring_buffer\n");
  2170. return ret;
  2171. }
  2172. ret = scan_workload(workload);
  2173. if (ret) {
  2174. gvt_vgpu_err("scan workload error\n");
  2175. return ret;
  2176. }
  2177. return 0;
  2178. }
  2179. static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
  2180. {
  2181. int ctx_size = wa_ctx->indirect_ctx.size;
  2182. unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
  2183. struct intel_vgpu_workload *workload = container_of(wa_ctx,
  2184. struct intel_vgpu_workload,
  2185. wa_ctx);
  2186. struct intel_vgpu *vgpu = workload->vgpu;
  2187. struct drm_i915_gem_object *obj;
  2188. int ret = 0;
  2189. void *map;
  2190. obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
  2191. roundup(ctx_size + CACHELINE_BYTES,
  2192. PAGE_SIZE));
  2193. if (IS_ERR(obj))
  2194. return PTR_ERR(obj);
  2195. /* get the va of the shadow batch buffer */
  2196. map = i915_gem_object_pin_map(obj, I915_MAP_WB);
  2197. if (IS_ERR(map)) {
  2198. gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
  2199. ret = PTR_ERR(map);
  2200. goto put_obj;
  2201. }
  2202. ret = i915_gem_object_set_to_cpu_domain(obj, false);
  2203. if (ret) {
  2204. gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
  2205. goto unmap_src;
  2206. }
  2207. ret = copy_gma_to_hva(workload->vgpu,
  2208. workload->vgpu->gtt.ggtt_mm,
  2209. guest_gma, guest_gma + ctx_size,
  2210. map);
  2211. if (ret < 0) {
  2212. gvt_vgpu_err("fail to copy guest indirect ctx\n");
  2213. goto unmap_src;
  2214. }
  2215. wa_ctx->indirect_ctx.obj = obj;
  2216. wa_ctx->indirect_ctx.shadow_va = map;
  2217. return 0;
  2218. unmap_src:
  2219. i915_gem_object_unpin_map(obj);
  2220. put_obj:
  2221. i915_gem_object_put(wa_ctx->indirect_ctx.obj);
  2222. return ret;
  2223. }
  2224. static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
  2225. {
  2226. uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
  2227. unsigned char *bb_start_sva;
  2228. per_ctx_start[0] = 0x18800001;
  2229. per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
  2230. bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
  2231. wa_ctx->indirect_ctx.size;
  2232. memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
  2233. return 0;
  2234. }
  2235. int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
  2236. {
  2237. int ret;
  2238. struct intel_vgpu_workload *workload = container_of(wa_ctx,
  2239. struct intel_vgpu_workload,
  2240. wa_ctx);
  2241. struct intel_vgpu *vgpu = workload->vgpu;
  2242. if (wa_ctx->indirect_ctx.size == 0)
  2243. return 0;
  2244. ret = shadow_indirect_ctx(wa_ctx);
  2245. if (ret) {
  2246. gvt_vgpu_err("fail to shadow indirect ctx\n");
  2247. return ret;
  2248. }
  2249. combine_wa_ctx(wa_ctx);
  2250. ret = scan_wa_ctx(wa_ctx);
  2251. if (ret) {
  2252. gvt_vgpu_err("scan wa ctx error\n");
  2253. return ret;
  2254. }
  2255. return 0;
  2256. }
  2257. static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
  2258. unsigned int opcode, int rings)
  2259. {
  2260. struct cmd_info *info = NULL;
  2261. unsigned int ring;
  2262. for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
  2263. info = find_cmd_entry(gvt, opcode, ring);
  2264. if (info)
  2265. break;
  2266. }
  2267. return info;
  2268. }
  2269. static int init_cmd_table(struct intel_gvt *gvt)
  2270. {
  2271. int i;
  2272. struct cmd_entry *e;
  2273. struct cmd_info *info;
  2274. unsigned int gen_type;
  2275. gen_type = intel_gvt_get_device_type(gvt);
  2276. for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
  2277. if (!(cmd_info[i].devices & gen_type))
  2278. continue;
  2279. e = kzalloc(sizeof(*e), GFP_KERNEL);
  2280. if (!e)
  2281. return -ENOMEM;
  2282. e->info = &cmd_info[i];
  2283. info = find_cmd_entry_any_ring(gvt,
  2284. e->info->opcode, e->info->rings);
  2285. if (info) {
  2286. gvt_err("%s %s duplicated\n", e->info->name,
  2287. info->name);
  2288. return -EEXIST;
  2289. }
  2290. INIT_HLIST_NODE(&e->hlist);
  2291. add_cmd_entry(gvt, e);
  2292. gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
  2293. e->info->name, e->info->opcode, e->info->flag,
  2294. e->info->devices, e->info->rings);
  2295. }
  2296. return 0;
  2297. }
  2298. static void clean_cmd_table(struct intel_gvt *gvt)
  2299. {
  2300. struct hlist_node *tmp;
  2301. struct cmd_entry *e;
  2302. int i;
  2303. hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
  2304. kfree(e);
  2305. hash_init(gvt->cmd_table);
  2306. }
  2307. void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
  2308. {
  2309. clean_cmd_table(gvt);
  2310. }
  2311. int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
  2312. {
  2313. int ret;
  2314. ret = init_cmd_table(gvt);
  2315. if (ret) {
  2316. intel_gvt_clean_cmd_parser(gvt);
  2317. return ret;
  2318. }
  2319. return 0;
  2320. }