handlers.c 92 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063
  1. /*
  2. * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. * SOFTWARE.
  22. *
  23. * Authors:
  24. * Kevin Tian <kevin.tian@intel.com>
  25. * Eddie Dong <eddie.dong@intel.com>
  26. * Zhiyuan Lv <zhiyuan.lv@intel.com>
  27. *
  28. * Contributors:
  29. * Min He <min.he@intel.com>
  30. * Tina Zhang <tina.zhang@intel.com>
  31. * Pei Zhang <pei.zhang@intel.com>
  32. * Niu Bing <bing.niu@intel.com>
  33. * Ping Gao <ping.a.gao@intel.com>
  34. * Zhi Wang <zhi.a.wang@intel.com>
  35. *
  36. */
  37. #include "i915_drv.h"
  38. #include "gvt.h"
  39. #include "i915_pvinfo.h"
  40. /* XXX FIXME i915 has changed PP_XXX definition */
  41. #define PCH_PP_STATUS _MMIO(0xc7200)
  42. #define PCH_PP_CONTROL _MMIO(0xc7204)
  43. #define PCH_PP_ON_DELAYS _MMIO(0xc7208)
  44. #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
  45. #define PCH_PP_DIVISOR _MMIO(0xc7210)
  46. unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
  47. {
  48. if (IS_BROADWELL(gvt->dev_priv))
  49. return D_BDW;
  50. else if (IS_SKYLAKE(gvt->dev_priv))
  51. return D_SKL;
  52. else if (IS_KABYLAKE(gvt->dev_priv))
  53. return D_KBL;
  54. return 0;
  55. }
  56. bool intel_gvt_match_device(struct intel_gvt *gvt,
  57. unsigned long device)
  58. {
  59. return intel_gvt_get_device_type(gvt) & device;
  60. }
  61. static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
  62. void *p_data, unsigned int bytes)
  63. {
  64. memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
  65. }
  66. static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
  67. void *p_data, unsigned int bytes)
  68. {
  69. memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
  70. }
  71. static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
  72. unsigned int offset)
  73. {
  74. struct intel_gvt_mmio_info *e;
  75. hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
  76. if (e->offset == offset)
  77. return e;
  78. }
  79. return NULL;
  80. }
  81. static int new_mmio_info(struct intel_gvt *gvt,
  82. u32 offset, u8 flags, u32 size,
  83. u32 addr_mask, u32 ro_mask, u32 device,
  84. gvt_mmio_func read, gvt_mmio_func write)
  85. {
  86. struct intel_gvt_mmio_info *info, *p;
  87. u32 start, end, i;
  88. if (!intel_gvt_match_device(gvt, device))
  89. return 0;
  90. if (WARN_ON(!IS_ALIGNED(offset, 4)))
  91. return -EINVAL;
  92. start = offset;
  93. end = offset + size;
  94. for (i = start; i < end; i += 4) {
  95. info = kzalloc(sizeof(*info), GFP_KERNEL);
  96. if (!info)
  97. return -ENOMEM;
  98. info->offset = i;
  99. p = find_mmio_info(gvt, info->offset);
  100. if (p) {
  101. WARN(1, "dup mmio definition offset %x\n",
  102. info->offset);
  103. kfree(info);
  104. /* We return -EEXIST here to make GVT-g load fail.
  105. * So duplicated MMIO can be found as soon as
  106. * possible.
  107. */
  108. return -EEXIST;
  109. }
  110. info->ro_mask = ro_mask;
  111. info->device = device;
  112. info->read = read ? read : intel_vgpu_default_mmio_read;
  113. info->write = write ? write : intel_vgpu_default_mmio_write;
  114. gvt->mmio.mmio_attribute[info->offset / 4] = flags;
  115. INIT_HLIST_NODE(&info->node);
  116. hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
  117. gvt->mmio.num_tracked_mmio++;
  118. }
  119. return 0;
  120. }
  121. static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
  122. {
  123. enum intel_engine_id id;
  124. struct intel_engine_cs *engine;
  125. reg &= ~GENMASK(11, 0);
  126. for_each_engine(engine, gvt->dev_priv, id) {
  127. if (engine->mmio_base == reg)
  128. return id;
  129. }
  130. return -1;
  131. }
  132. #define offset_to_fence_num(offset) \
  133. ((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
  134. #define fence_num_to_offset(num) \
  135. (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
  136. static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
  137. {
  138. switch (reason) {
  139. case GVT_FAILSAFE_UNSUPPORTED_GUEST:
  140. pr_err("Detected your guest driver doesn't support GVT-g.\n");
  141. break;
  142. case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
  143. pr_err("Graphics resource is not enough for the guest\n");
  144. default:
  145. break;
  146. }
  147. pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
  148. vgpu->failsafe = true;
  149. }
  150. static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
  151. unsigned int fence_num, void *p_data, unsigned int bytes)
  152. {
  153. if (fence_num >= vgpu_fence_sz(vgpu)) {
  154. /* When guest access oob fence regs without access
  155. * pv_info first, we treat guest not supporting GVT,
  156. * and we will let vgpu enter failsafe mode.
  157. */
  158. if (!vgpu->pv_notified)
  159. enter_failsafe_mode(vgpu,
  160. GVT_FAILSAFE_UNSUPPORTED_GUEST);
  161. if (!vgpu->mmio.disable_warn_untrack) {
  162. gvt_vgpu_err("found oob fence register access\n");
  163. gvt_vgpu_err("total fence %d, access fence %d\n",
  164. vgpu_fence_sz(vgpu), fence_num);
  165. }
  166. memset(p_data, 0, bytes);
  167. return -EINVAL;
  168. }
  169. return 0;
  170. }
  171. static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
  172. void *p_data, unsigned int bytes)
  173. {
  174. int ret;
  175. ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
  176. p_data, bytes);
  177. if (ret)
  178. return ret;
  179. read_vreg(vgpu, off, p_data, bytes);
  180. return 0;
  181. }
  182. static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
  183. void *p_data, unsigned int bytes)
  184. {
  185. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  186. unsigned int fence_num = offset_to_fence_num(off);
  187. int ret;
  188. ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
  189. if (ret)
  190. return ret;
  191. write_vreg(vgpu, off, p_data, bytes);
  192. mmio_hw_access_pre(dev_priv);
  193. intel_vgpu_write_fence(vgpu, fence_num,
  194. vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
  195. mmio_hw_access_post(dev_priv);
  196. return 0;
  197. }
  198. #define CALC_MODE_MASK_REG(old, new) \
  199. (((new) & GENMASK(31, 16)) \
  200. | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
  201. | ((new) & ((new) >> 16))))
  202. static int mul_force_wake_write(struct intel_vgpu *vgpu,
  203. unsigned int offset, void *p_data, unsigned int bytes)
  204. {
  205. u32 old, new;
  206. uint32_t ack_reg_offset;
  207. old = vgpu_vreg(vgpu, offset);
  208. new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
  209. if (IS_SKYLAKE(vgpu->gvt->dev_priv)
  210. || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
  211. switch (offset) {
  212. case FORCEWAKE_RENDER_GEN9_REG:
  213. ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
  214. break;
  215. case FORCEWAKE_BLITTER_GEN9_REG:
  216. ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
  217. break;
  218. case FORCEWAKE_MEDIA_GEN9_REG:
  219. ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
  220. break;
  221. default:
  222. /*should not hit here*/
  223. gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
  224. return -EINVAL;
  225. }
  226. } else {
  227. ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
  228. }
  229. vgpu_vreg(vgpu, offset) = new;
  230. vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
  231. return 0;
  232. }
  233. static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  234. void *p_data, unsigned int bytes)
  235. {
  236. unsigned int engine_mask = 0;
  237. u32 data;
  238. write_vreg(vgpu, offset, p_data, bytes);
  239. data = vgpu_vreg(vgpu, offset);
  240. if (data & GEN6_GRDOM_FULL) {
  241. gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
  242. engine_mask = ALL_ENGINES;
  243. } else {
  244. if (data & GEN6_GRDOM_RENDER) {
  245. gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
  246. engine_mask |= (1 << RCS);
  247. }
  248. if (data & GEN6_GRDOM_MEDIA) {
  249. gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
  250. engine_mask |= (1 << VCS);
  251. }
  252. if (data & GEN6_GRDOM_BLT) {
  253. gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
  254. engine_mask |= (1 << BCS);
  255. }
  256. if (data & GEN6_GRDOM_VECS) {
  257. gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
  258. engine_mask |= (1 << VECS);
  259. }
  260. if (data & GEN8_GRDOM_MEDIA2) {
  261. gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
  262. if (HAS_BSD2(vgpu->gvt->dev_priv))
  263. engine_mask |= (1 << VCS2);
  264. }
  265. }
  266. intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
  267. /* sw will wait for the device to ack the reset request */
  268. vgpu_vreg(vgpu, offset) = 0;
  269. return 0;
  270. }
  271. static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  272. void *p_data, unsigned int bytes)
  273. {
  274. return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
  275. }
  276. static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  277. void *p_data, unsigned int bytes)
  278. {
  279. return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
  280. }
  281. static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
  282. unsigned int offset, void *p_data, unsigned int bytes)
  283. {
  284. write_vreg(vgpu, offset, p_data, bytes);
  285. if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
  286. vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON;
  287. vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
  288. vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
  289. vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
  290. } else
  291. vgpu_vreg(vgpu, PCH_PP_STATUS) &=
  292. ~(PP_ON | PP_SEQUENCE_POWER_DOWN
  293. | PP_CYCLE_DELAY_ACTIVE);
  294. return 0;
  295. }
  296. static int transconf_mmio_write(struct intel_vgpu *vgpu,
  297. unsigned int offset, void *p_data, unsigned int bytes)
  298. {
  299. write_vreg(vgpu, offset, p_data, bytes);
  300. if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
  301. vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
  302. else
  303. vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
  304. return 0;
  305. }
  306. static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  307. void *p_data, unsigned int bytes)
  308. {
  309. write_vreg(vgpu, offset, p_data, bytes);
  310. if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
  311. vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
  312. else
  313. vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
  314. if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
  315. vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
  316. else
  317. vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
  318. return 0;
  319. }
  320. static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  321. void *p_data, unsigned int bytes)
  322. {
  323. switch (offset) {
  324. case 0xe651c:
  325. case 0xe661c:
  326. case 0xe671c:
  327. case 0xe681c:
  328. vgpu_vreg(vgpu, offset) = 1 << 17;
  329. break;
  330. case 0xe6c04:
  331. vgpu_vreg(vgpu, offset) = 0x3;
  332. break;
  333. case 0xe6e1c:
  334. vgpu_vreg(vgpu, offset) = 0x2f << 16;
  335. break;
  336. default:
  337. return -EINVAL;
  338. }
  339. read_vreg(vgpu, offset, p_data, bytes);
  340. return 0;
  341. }
  342. static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  343. void *p_data, unsigned int bytes)
  344. {
  345. u32 data;
  346. write_vreg(vgpu, offset, p_data, bytes);
  347. data = vgpu_vreg(vgpu, offset);
  348. if (data & PIPECONF_ENABLE)
  349. vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
  350. else
  351. vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
  352. intel_gvt_check_vblank_emulation(vgpu->gvt);
  353. return 0;
  354. }
  355. /* ascendingly sorted */
  356. static i915_reg_t force_nonpriv_white_list[] = {
  357. GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
  358. GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
  359. GEN8_CS_CHICKEN1,//_MMIO(0x2580)
  360. _MMIO(0x2690),
  361. _MMIO(0x2694),
  362. _MMIO(0x2698),
  363. _MMIO(0x4de0),
  364. _MMIO(0x4de4),
  365. _MMIO(0x4dfc),
  366. GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
  367. _MMIO(0x7014),
  368. HDC_CHICKEN0,//_MMIO(0x7300)
  369. GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
  370. _MMIO(0x7700),
  371. _MMIO(0x7704),
  372. _MMIO(0x7708),
  373. _MMIO(0x770c),
  374. _MMIO(0xb110),
  375. GEN8_L3SQCREG4,//_MMIO(0xb118)
  376. _MMIO(0xe100),
  377. _MMIO(0xe18c),
  378. _MMIO(0xe48c),
  379. _MMIO(0xe5f4),
  380. };
  381. /* a simple bsearch */
  382. static inline bool in_whitelist(unsigned int reg)
  383. {
  384. int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
  385. i915_reg_t *array = force_nonpriv_white_list;
  386. while (left < right) {
  387. int mid = (left + right)/2;
  388. if (reg > array[mid].reg)
  389. left = mid + 1;
  390. else if (reg < array[mid].reg)
  391. right = mid;
  392. else
  393. return true;
  394. }
  395. return false;
  396. }
  397. static int force_nonpriv_write(struct intel_vgpu *vgpu,
  398. unsigned int offset, void *p_data, unsigned int bytes)
  399. {
  400. u32 reg_nonpriv = *(u32 *)p_data;
  401. int ret = -EINVAL;
  402. if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
  403. gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
  404. vgpu->id, offset, bytes);
  405. return ret;
  406. }
  407. if (in_whitelist(reg_nonpriv)) {
  408. ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
  409. bytes);
  410. } else {
  411. gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
  412. vgpu->id, reg_nonpriv);
  413. }
  414. return ret;
  415. }
  416. static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  417. void *p_data, unsigned int bytes)
  418. {
  419. write_vreg(vgpu, offset, p_data, bytes);
  420. if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
  421. vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
  422. } else {
  423. vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
  424. if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
  425. vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E))
  426. &= ~DP_TP_STATUS_AUTOTRAIN_DONE;
  427. }
  428. return 0;
  429. }
  430. static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
  431. unsigned int offset, void *p_data, unsigned int bytes)
  432. {
  433. vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
  434. return 0;
  435. }
  436. #define FDI_LINK_TRAIN_PATTERN1 0
  437. #define FDI_LINK_TRAIN_PATTERN2 1
  438. static int fdi_auto_training_started(struct intel_vgpu *vgpu)
  439. {
  440. u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E));
  441. u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
  442. u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E));
  443. if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
  444. (rx_ctl & FDI_RX_ENABLE) &&
  445. (rx_ctl & FDI_AUTO_TRAINING) &&
  446. (tx_ctl & DP_TP_CTL_ENABLE) &&
  447. (tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
  448. return 1;
  449. else
  450. return 0;
  451. }
  452. static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
  453. enum pipe pipe, unsigned int train_pattern)
  454. {
  455. i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
  456. unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
  457. unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
  458. unsigned int fdi_iir_check_bits;
  459. fdi_rx_imr = FDI_RX_IMR(pipe);
  460. fdi_tx_ctl = FDI_TX_CTL(pipe);
  461. fdi_rx_ctl = FDI_RX_CTL(pipe);
  462. if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
  463. fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
  464. fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
  465. fdi_iir_check_bits = FDI_RX_BIT_LOCK;
  466. } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
  467. fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
  468. fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
  469. fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
  470. } else {
  471. gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
  472. return -EINVAL;
  473. }
  474. fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
  475. fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
  476. /* If imr bit has been masked */
  477. if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
  478. return 0;
  479. if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
  480. == fdi_tx_check_bits)
  481. && ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
  482. == fdi_rx_check_bits))
  483. return 1;
  484. else
  485. return 0;
  486. }
  487. #define INVALID_INDEX (~0U)
  488. static unsigned int calc_index(unsigned int offset, unsigned int start,
  489. unsigned int next, unsigned int end, i915_reg_t i915_end)
  490. {
  491. unsigned int range = next - start;
  492. if (!end)
  493. end = i915_mmio_reg_offset(i915_end);
  494. if (offset < start || offset > end)
  495. return INVALID_INDEX;
  496. offset -= start;
  497. return offset / range;
  498. }
  499. #define FDI_RX_CTL_TO_PIPE(offset) \
  500. calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
  501. #define FDI_TX_CTL_TO_PIPE(offset) \
  502. calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
  503. #define FDI_RX_IMR_TO_PIPE(offset) \
  504. calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
  505. static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
  506. unsigned int offset, void *p_data, unsigned int bytes)
  507. {
  508. i915_reg_t fdi_rx_iir;
  509. unsigned int index;
  510. int ret;
  511. if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
  512. index = FDI_RX_CTL_TO_PIPE(offset);
  513. else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
  514. index = FDI_TX_CTL_TO_PIPE(offset);
  515. else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
  516. index = FDI_RX_IMR_TO_PIPE(offset);
  517. else {
  518. gvt_vgpu_err("Unsupport registers %x\n", offset);
  519. return -EINVAL;
  520. }
  521. write_vreg(vgpu, offset, p_data, bytes);
  522. fdi_rx_iir = FDI_RX_IIR(index);
  523. ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
  524. if (ret < 0)
  525. return ret;
  526. if (ret)
  527. vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
  528. ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
  529. if (ret < 0)
  530. return ret;
  531. if (ret)
  532. vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
  533. if (offset == _FDI_RXA_CTL)
  534. if (fdi_auto_training_started(vgpu))
  535. vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |=
  536. DP_TP_STATUS_AUTOTRAIN_DONE;
  537. return 0;
  538. }
  539. #define DP_TP_CTL_TO_PORT(offset) \
  540. calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
  541. static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  542. void *p_data, unsigned int bytes)
  543. {
  544. i915_reg_t status_reg;
  545. unsigned int index;
  546. u32 data;
  547. write_vreg(vgpu, offset, p_data, bytes);
  548. index = DP_TP_CTL_TO_PORT(offset);
  549. data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
  550. if (data == 0x2) {
  551. status_reg = DP_TP_STATUS(index);
  552. vgpu_vreg(vgpu, status_reg) |= (1 << 25);
  553. }
  554. return 0;
  555. }
  556. static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
  557. unsigned int offset, void *p_data, unsigned int bytes)
  558. {
  559. u32 reg_val;
  560. u32 sticky_mask;
  561. reg_val = *((u32 *)p_data);
  562. sticky_mask = GENMASK(27, 26) | (1 << 24);
  563. vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
  564. (vgpu_vreg(vgpu, offset) & sticky_mask);
  565. vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
  566. return 0;
  567. }
  568. static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
  569. unsigned int offset, void *p_data, unsigned int bytes)
  570. {
  571. u32 data;
  572. write_vreg(vgpu, offset, p_data, bytes);
  573. data = vgpu_vreg(vgpu, offset);
  574. if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
  575. vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
  576. return 0;
  577. }
  578. static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
  579. unsigned int offset, void *p_data, unsigned int bytes)
  580. {
  581. u32 data;
  582. write_vreg(vgpu, offset, p_data, bytes);
  583. data = vgpu_vreg(vgpu, offset);
  584. if (data & FDI_MPHY_IOSFSB_RESET_CTL)
  585. vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
  586. else
  587. vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
  588. return 0;
  589. }
  590. #define DSPSURF_TO_PIPE(offset) \
  591. calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
  592. static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  593. void *p_data, unsigned int bytes)
  594. {
  595. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  596. unsigned int index = DSPSURF_TO_PIPE(offset);
  597. i915_reg_t surflive_reg = DSPSURFLIVE(index);
  598. int flip_event[] = {
  599. [PIPE_A] = PRIMARY_A_FLIP_DONE,
  600. [PIPE_B] = PRIMARY_B_FLIP_DONE,
  601. [PIPE_C] = PRIMARY_C_FLIP_DONE,
  602. };
  603. write_vreg(vgpu, offset, p_data, bytes);
  604. vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
  605. set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
  606. return 0;
  607. }
  608. #define SPRSURF_TO_PIPE(offset) \
  609. calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
  610. static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  611. void *p_data, unsigned int bytes)
  612. {
  613. unsigned int index = SPRSURF_TO_PIPE(offset);
  614. i915_reg_t surflive_reg = SPRSURFLIVE(index);
  615. int flip_event[] = {
  616. [PIPE_A] = SPRITE_A_FLIP_DONE,
  617. [PIPE_B] = SPRITE_B_FLIP_DONE,
  618. [PIPE_C] = SPRITE_C_FLIP_DONE,
  619. };
  620. write_vreg(vgpu, offset, p_data, bytes);
  621. vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
  622. set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
  623. return 0;
  624. }
  625. static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
  626. unsigned int reg)
  627. {
  628. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  629. enum intel_gvt_event_type event;
  630. if (reg == _DPA_AUX_CH_CTL)
  631. event = AUX_CHANNEL_A;
  632. else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
  633. event = AUX_CHANNEL_B;
  634. else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
  635. event = AUX_CHANNEL_C;
  636. else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
  637. event = AUX_CHANNEL_D;
  638. else {
  639. WARN_ON(true);
  640. return -EINVAL;
  641. }
  642. intel_vgpu_trigger_virtual_event(vgpu, event);
  643. return 0;
  644. }
  645. static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
  646. unsigned int reg, int len, bool data_valid)
  647. {
  648. /* mark transaction done */
  649. value |= DP_AUX_CH_CTL_DONE;
  650. value &= ~DP_AUX_CH_CTL_SEND_BUSY;
  651. value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
  652. if (data_valid)
  653. value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
  654. else
  655. value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
  656. /* message size */
  657. value &= ~(0xf << 20);
  658. value |= (len << 20);
  659. vgpu_vreg(vgpu, reg) = value;
  660. if (value & DP_AUX_CH_CTL_INTERRUPT)
  661. return trigger_aux_channel_interrupt(vgpu, reg);
  662. return 0;
  663. }
  664. static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
  665. uint8_t t)
  666. {
  667. if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
  668. /* training pattern 1 for CR */
  669. /* set LANE0_CR_DONE, LANE1_CR_DONE */
  670. dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
  671. /* set LANE2_CR_DONE, LANE3_CR_DONE */
  672. dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
  673. } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
  674. DPCD_TRAINING_PATTERN_2) {
  675. /* training pattern 2 for EQ */
  676. /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
  677. dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
  678. dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
  679. /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
  680. dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
  681. dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
  682. /* set INTERLANE_ALIGN_DONE */
  683. dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
  684. DPCD_INTERLANE_ALIGN_DONE;
  685. } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
  686. DPCD_LINK_TRAINING_DISABLED) {
  687. /* finish link training */
  688. /* set sink status as synchronized */
  689. dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
  690. }
  691. }
  692. #define _REG_HSW_DP_AUX_CH_CTL(dp) \
  693. ((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
  694. #define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
  695. #define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
  696. #define dpy_is_valid_port(port) \
  697. (((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
  698. static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
  699. unsigned int offset, void *p_data, unsigned int bytes)
  700. {
  701. struct intel_vgpu_display *display = &vgpu->display;
  702. int msg, addr, ctrl, op, len;
  703. int port_index = OFFSET_TO_DP_AUX_PORT(offset);
  704. struct intel_vgpu_dpcd_data *dpcd = NULL;
  705. struct intel_vgpu_port *port = NULL;
  706. u32 data;
  707. if (!dpy_is_valid_port(port_index)) {
  708. gvt_vgpu_err("Unsupported DP port access!\n");
  709. return 0;
  710. }
  711. write_vreg(vgpu, offset, p_data, bytes);
  712. data = vgpu_vreg(vgpu, offset);
  713. if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
  714. || IS_KABYLAKE(vgpu->gvt->dev_priv))
  715. && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
  716. /* SKL DPB/C/D aux ctl register changed */
  717. return 0;
  718. } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
  719. offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
  720. /* write to the data registers */
  721. return 0;
  722. }
  723. if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
  724. /* just want to clear the sticky bits */
  725. vgpu_vreg(vgpu, offset) = 0;
  726. return 0;
  727. }
  728. port = &display->ports[port_index];
  729. dpcd = port->dpcd;
  730. /* read out message from DATA1 register */
  731. msg = vgpu_vreg(vgpu, offset + 4);
  732. addr = (msg >> 8) & 0xffff;
  733. ctrl = (msg >> 24) & 0xff;
  734. len = msg & 0xff;
  735. op = ctrl >> 4;
  736. if (op == GVT_AUX_NATIVE_WRITE) {
  737. int t;
  738. uint8_t buf[16];
  739. if ((addr + len + 1) >= DPCD_SIZE) {
  740. /*
  741. * Write request exceeds what we supported,
  742. * DCPD spec: When a Source Device is writing a DPCD
  743. * address not supported by the Sink Device, the Sink
  744. * Device shall reply with AUX NACK and “M” equal to
  745. * zero.
  746. */
  747. /* NAK the write */
  748. vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
  749. dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
  750. return 0;
  751. }
  752. /*
  753. * Write request format: (command + address) occupies
  754. * 3 bytes, followed by (len + 1) bytes of data.
  755. */
  756. if (WARN_ON((len + 4) > AUX_BURST_SIZE))
  757. return -EINVAL;
  758. /* unpack data from vreg to buf */
  759. for (t = 0; t < 4; t++) {
  760. u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
  761. buf[t * 4] = (r >> 24) & 0xff;
  762. buf[t * 4 + 1] = (r >> 16) & 0xff;
  763. buf[t * 4 + 2] = (r >> 8) & 0xff;
  764. buf[t * 4 + 3] = r & 0xff;
  765. }
  766. /* write to virtual DPCD */
  767. if (dpcd && dpcd->data_valid) {
  768. for (t = 0; t <= len; t++) {
  769. int p = addr + t;
  770. dpcd->data[p] = buf[t];
  771. /* check for link training */
  772. if (p == DPCD_TRAINING_PATTERN_SET)
  773. dp_aux_ch_ctl_link_training(dpcd,
  774. buf[t]);
  775. }
  776. }
  777. /* ACK the write */
  778. vgpu_vreg(vgpu, offset + 4) = 0;
  779. dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
  780. dpcd && dpcd->data_valid);
  781. return 0;
  782. }
  783. if (op == GVT_AUX_NATIVE_READ) {
  784. int idx, i, ret = 0;
  785. if ((addr + len + 1) >= DPCD_SIZE) {
  786. /*
  787. * read request exceeds what we supported
  788. * DPCD spec: A Sink Device receiving a Native AUX CH
  789. * read request for an unsupported DPCD address must
  790. * reply with an AUX ACK and read data set equal to
  791. * zero instead of replying with AUX NACK.
  792. */
  793. /* ACK the READ*/
  794. vgpu_vreg(vgpu, offset + 4) = 0;
  795. vgpu_vreg(vgpu, offset + 8) = 0;
  796. vgpu_vreg(vgpu, offset + 12) = 0;
  797. vgpu_vreg(vgpu, offset + 16) = 0;
  798. vgpu_vreg(vgpu, offset + 20) = 0;
  799. dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
  800. true);
  801. return 0;
  802. }
  803. for (idx = 1; idx <= 5; idx++) {
  804. /* clear the data registers */
  805. vgpu_vreg(vgpu, offset + 4 * idx) = 0;
  806. }
  807. /*
  808. * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
  809. */
  810. if (WARN_ON((len + 2) > AUX_BURST_SIZE))
  811. return -EINVAL;
  812. /* read from virtual DPCD to vreg */
  813. /* first 4 bytes: [ACK][addr][addr+1][addr+2] */
  814. if (dpcd && dpcd->data_valid) {
  815. for (i = 1; i <= (len + 1); i++) {
  816. int t;
  817. t = dpcd->data[addr + i - 1];
  818. t <<= (24 - 8 * (i % 4));
  819. ret |= t;
  820. if ((i % 4 == 3) || (i == (len + 1))) {
  821. vgpu_vreg(vgpu, offset +
  822. (i / 4 + 1) * 4) = ret;
  823. ret = 0;
  824. }
  825. }
  826. }
  827. dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
  828. dpcd && dpcd->data_valid);
  829. return 0;
  830. }
  831. /* i2c transaction starts */
  832. intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
  833. if (data & DP_AUX_CH_CTL_INTERRUPT)
  834. trigger_aux_channel_interrupt(vgpu, offset);
  835. return 0;
  836. }
  837. static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
  838. void *p_data, unsigned int bytes)
  839. {
  840. *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
  841. write_vreg(vgpu, offset, p_data, bytes);
  842. return 0;
  843. }
  844. static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  845. void *p_data, unsigned int bytes)
  846. {
  847. bool vga_disable;
  848. write_vreg(vgpu, offset, p_data, bytes);
  849. vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
  850. gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
  851. vga_disable ? "Disable" : "Enable");
  852. return 0;
  853. }
  854. static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
  855. unsigned int sbi_offset)
  856. {
  857. struct intel_vgpu_display *display = &vgpu->display;
  858. int num = display->sbi.number;
  859. int i;
  860. for (i = 0; i < num; ++i)
  861. if (display->sbi.registers[i].offset == sbi_offset)
  862. break;
  863. if (i == num)
  864. return 0;
  865. return display->sbi.registers[i].value;
  866. }
  867. static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
  868. unsigned int offset, u32 value)
  869. {
  870. struct intel_vgpu_display *display = &vgpu->display;
  871. int num = display->sbi.number;
  872. int i;
  873. for (i = 0; i < num; ++i) {
  874. if (display->sbi.registers[i].offset == offset)
  875. break;
  876. }
  877. if (i == num) {
  878. if (num == SBI_REG_MAX) {
  879. gvt_vgpu_err("SBI caching meets maximum limits\n");
  880. return;
  881. }
  882. display->sbi.number++;
  883. }
  884. display->sbi.registers[i].offset = offset;
  885. display->sbi.registers[i].value = value;
  886. }
  887. static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  888. void *p_data, unsigned int bytes)
  889. {
  890. if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
  891. SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
  892. unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
  893. SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
  894. vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
  895. sbi_offset);
  896. }
  897. read_vreg(vgpu, offset, p_data, bytes);
  898. return 0;
  899. }
  900. static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  901. void *p_data, unsigned int bytes)
  902. {
  903. u32 data;
  904. write_vreg(vgpu, offset, p_data, bytes);
  905. data = vgpu_vreg(vgpu, offset);
  906. data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
  907. data |= SBI_READY;
  908. data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
  909. data |= SBI_RESPONSE_SUCCESS;
  910. vgpu_vreg(vgpu, offset) = data;
  911. if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
  912. SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
  913. unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
  914. SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
  915. write_virtual_sbi_register(vgpu, sbi_offset,
  916. vgpu_vreg(vgpu, SBI_DATA));
  917. }
  918. return 0;
  919. }
  920. #define _vgtif_reg(x) \
  921. (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
  922. static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  923. void *p_data, unsigned int bytes)
  924. {
  925. bool invalid_read = false;
  926. read_vreg(vgpu, offset, p_data, bytes);
  927. switch (offset) {
  928. case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
  929. if (offset + bytes > _vgtif_reg(vgt_id) + 4)
  930. invalid_read = true;
  931. break;
  932. case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
  933. _vgtif_reg(avail_rs.fence_num):
  934. if (offset + bytes >
  935. _vgtif_reg(avail_rs.fence_num) + 4)
  936. invalid_read = true;
  937. break;
  938. case 0x78010: /* vgt_caps */
  939. case 0x7881c:
  940. break;
  941. default:
  942. invalid_read = true;
  943. break;
  944. }
  945. if (invalid_read)
  946. gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
  947. offset, bytes, *(u32 *)p_data);
  948. vgpu->pv_notified = true;
  949. return 0;
  950. }
  951. static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
  952. {
  953. int ret = 0;
  954. switch (notification) {
  955. case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
  956. ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
  957. break;
  958. case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
  959. ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
  960. break;
  961. case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
  962. ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
  963. break;
  964. case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
  965. ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
  966. break;
  967. case VGT_G2V_EXECLIST_CONTEXT_CREATE:
  968. case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
  969. case 1: /* Remove this in guest driver. */
  970. break;
  971. default:
  972. gvt_vgpu_err("Invalid PV notification %d\n", notification);
  973. }
  974. return ret;
  975. }
  976. static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
  977. {
  978. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  979. struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
  980. char *env[3] = {NULL, NULL, NULL};
  981. char vmid_str[20];
  982. char display_ready_str[20];
  983. snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
  984. env[0] = display_ready_str;
  985. snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
  986. env[1] = vmid_str;
  987. return kobject_uevent_env(kobj, KOBJ_ADD, env);
  988. }
  989. static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  990. void *p_data, unsigned int bytes)
  991. {
  992. u32 data;
  993. int ret;
  994. write_vreg(vgpu, offset, p_data, bytes);
  995. data = vgpu_vreg(vgpu, offset);
  996. switch (offset) {
  997. case _vgtif_reg(display_ready):
  998. send_display_ready_uevent(vgpu, data ? 1 : 0);
  999. break;
  1000. case _vgtif_reg(g2v_notify):
  1001. ret = handle_g2v_notification(vgpu, data);
  1002. break;
  1003. /* add xhot and yhot to handled list to avoid error log */
  1004. case 0x78830:
  1005. case 0x78834:
  1006. case _vgtif_reg(pdp[0].lo):
  1007. case _vgtif_reg(pdp[0].hi):
  1008. case _vgtif_reg(pdp[1].lo):
  1009. case _vgtif_reg(pdp[1].hi):
  1010. case _vgtif_reg(pdp[2].lo):
  1011. case _vgtif_reg(pdp[2].hi):
  1012. case _vgtif_reg(pdp[3].lo):
  1013. case _vgtif_reg(pdp[3].hi):
  1014. case _vgtif_reg(execlist_context_descriptor_lo):
  1015. case _vgtif_reg(execlist_context_descriptor_hi):
  1016. break;
  1017. case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
  1018. enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
  1019. break;
  1020. default:
  1021. gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
  1022. offset, bytes, data);
  1023. break;
  1024. }
  1025. return 0;
  1026. }
  1027. static int pf_write(struct intel_vgpu *vgpu,
  1028. unsigned int offset, void *p_data, unsigned int bytes)
  1029. {
  1030. u32 val = *(u32 *)p_data;
  1031. if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
  1032. offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
  1033. offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
  1034. WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
  1035. vgpu->id);
  1036. return 0;
  1037. }
  1038. return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
  1039. }
  1040. static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
  1041. unsigned int offset, void *p_data, unsigned int bytes)
  1042. {
  1043. write_vreg(vgpu, offset, p_data, bytes);
  1044. if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_DISP_PW_GLOBAL))
  1045. vgpu_vreg(vgpu, offset) |=
  1046. HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
  1047. else
  1048. vgpu_vreg(vgpu, offset) &=
  1049. ~HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
  1050. return 0;
  1051. }
  1052. static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
  1053. unsigned int offset, void *p_data, unsigned int bytes)
  1054. {
  1055. write_vreg(vgpu, offset, p_data, bytes);
  1056. if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
  1057. vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
  1058. return 0;
  1059. }
  1060. static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
  1061. void *p_data, unsigned int bytes)
  1062. {
  1063. u32 mode;
  1064. write_vreg(vgpu, offset, p_data, bytes);
  1065. mode = vgpu_vreg(vgpu, offset);
  1066. if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
  1067. WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
  1068. vgpu->id);
  1069. return 0;
  1070. }
  1071. return 0;
  1072. }
  1073. static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
  1074. void *p_data, unsigned int bytes)
  1075. {
  1076. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  1077. u32 trtte = *(u32 *)p_data;
  1078. if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
  1079. WARN(1, "VM(%d): Use physical address for TRTT!\n",
  1080. vgpu->id);
  1081. return -EINVAL;
  1082. }
  1083. write_vreg(vgpu, offset, p_data, bytes);
  1084. /* TRTTE is not per-context */
  1085. mmio_hw_access_pre(dev_priv);
  1086. I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
  1087. mmio_hw_access_post(dev_priv);
  1088. return 0;
  1089. }
  1090. static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
  1091. void *p_data, unsigned int bytes)
  1092. {
  1093. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  1094. u32 val = *(u32 *)p_data;
  1095. if (val & 1) {
  1096. /* unblock hw logic */
  1097. mmio_hw_access_pre(dev_priv);
  1098. I915_WRITE(_MMIO(offset), val);
  1099. mmio_hw_access_post(dev_priv);
  1100. }
  1101. write_vreg(vgpu, offset, p_data, bytes);
  1102. return 0;
  1103. }
  1104. static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
  1105. void *p_data, unsigned int bytes)
  1106. {
  1107. u32 v = 0;
  1108. if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
  1109. v |= (1 << 0);
  1110. if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
  1111. v |= (1 << 8);
  1112. if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
  1113. v |= (1 << 16);
  1114. if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
  1115. v |= (1 << 24);
  1116. vgpu_vreg(vgpu, offset) = v;
  1117. return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
  1118. }
  1119. static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
  1120. void *p_data, unsigned int bytes)
  1121. {
  1122. u32 value = *(u32 *)p_data;
  1123. u32 cmd = value & 0xff;
  1124. u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
  1125. switch (cmd) {
  1126. case GEN9_PCODE_READ_MEM_LATENCY:
  1127. if (IS_SKYLAKE(vgpu->gvt->dev_priv)
  1128. || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
  1129. /**
  1130. * "Read memory latency" command on gen9.
  1131. * Below memory latency values are read
  1132. * from skylake platform.
  1133. */
  1134. if (!*data0)
  1135. *data0 = 0x1e1a1100;
  1136. else
  1137. *data0 = 0x61514b3d;
  1138. }
  1139. break;
  1140. case SKL_PCODE_CDCLK_CONTROL:
  1141. if (IS_SKYLAKE(vgpu->gvt->dev_priv)
  1142. || IS_KABYLAKE(vgpu->gvt->dev_priv))
  1143. *data0 = SKL_CDCLK_READY_FOR_CHANGE;
  1144. break;
  1145. case GEN6_PCODE_READ_RC6VIDS:
  1146. *data0 |= 0x1;
  1147. break;
  1148. }
  1149. gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
  1150. vgpu->id, value, *data0);
  1151. /**
  1152. * PCODE_READY clear means ready for pcode read/write,
  1153. * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
  1154. * always emulate as pcode read/write success and ready for access
  1155. * anytime, since we don't touch real physical registers here.
  1156. */
  1157. value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
  1158. return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
  1159. }
  1160. static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
  1161. unsigned int offset, void *p_data, unsigned int bytes)
  1162. {
  1163. u32 v = *(u32 *)p_data;
  1164. v &= (1 << 31) | (1 << 29) | (1 << 9) |
  1165. (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
  1166. v |= (v >> 1);
  1167. return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
  1168. }
  1169. static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
  1170. void *p_data, unsigned int bytes)
  1171. {
  1172. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  1173. u32 v = *(u32 *)p_data;
  1174. if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
  1175. return intel_vgpu_default_mmio_write(vgpu,
  1176. offset, p_data, bytes);
  1177. switch (offset) {
  1178. case 0x4ddc:
  1179. /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
  1180. vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
  1181. break;
  1182. case 0x42080:
  1183. /* bypass WaCompressedResourceDisplayNewHashMode */
  1184. vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
  1185. break;
  1186. case 0xe194:
  1187. /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
  1188. vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
  1189. break;
  1190. case 0x7014:
  1191. /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
  1192. vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
  1193. break;
  1194. default:
  1195. return -EINVAL;
  1196. }
  1197. return 0;
  1198. }
  1199. static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
  1200. void *p_data, unsigned int bytes)
  1201. {
  1202. u32 v = *(u32 *)p_data;
  1203. /* other bits are MBZ. */
  1204. v &= (1 << 31) | (1 << 30);
  1205. v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
  1206. vgpu_vreg(vgpu, offset) = v;
  1207. return 0;
  1208. }
  1209. static int mmio_read_from_hw(struct intel_vgpu *vgpu,
  1210. unsigned int offset, void *p_data, unsigned int bytes)
  1211. {
  1212. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  1213. mmio_hw_access_pre(dev_priv);
  1214. vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
  1215. mmio_hw_access_post(dev_priv);
  1216. return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
  1217. }
  1218. static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  1219. void *p_data, unsigned int bytes)
  1220. {
  1221. int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
  1222. struct intel_vgpu_execlist *execlist;
  1223. u32 data = *(u32 *)p_data;
  1224. int ret = 0;
  1225. if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
  1226. return -EINVAL;
  1227. execlist = &vgpu->execlist[ring_id];
  1228. execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
  1229. if (execlist->elsp_dwords.index == 3) {
  1230. ret = intel_vgpu_submit_execlist(vgpu, ring_id);
  1231. if(ret)
  1232. gvt_vgpu_err("fail submit workload on ring %d\n",
  1233. ring_id);
  1234. }
  1235. ++execlist->elsp_dwords.index;
  1236. execlist->elsp_dwords.index &= 0x3;
  1237. return ret;
  1238. }
  1239. static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  1240. void *p_data, unsigned int bytes)
  1241. {
  1242. u32 data = *(u32 *)p_data;
  1243. int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
  1244. bool enable_execlist;
  1245. write_vreg(vgpu, offset, p_data, bytes);
  1246. /* when PPGTT mode enabled, we will check if guest has called
  1247. * pvinfo, if not, we will treat this guest as non-gvtg-aware
  1248. * guest, and stop emulating its cfg space, mmio, gtt, etc.
  1249. */
  1250. if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
  1251. (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
  1252. && !vgpu->pv_notified) {
  1253. enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
  1254. return 0;
  1255. }
  1256. if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
  1257. || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
  1258. enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
  1259. gvt_dbg_core("EXECLIST %s on ring %d\n",
  1260. (enable_execlist ? "enabling" : "disabling"),
  1261. ring_id);
  1262. if (enable_execlist)
  1263. intel_vgpu_start_schedule(vgpu);
  1264. }
  1265. return 0;
  1266. }
  1267. static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
  1268. unsigned int offset, void *p_data, unsigned int bytes)
  1269. {
  1270. unsigned int id = 0;
  1271. write_vreg(vgpu, offset, p_data, bytes);
  1272. vgpu_vreg(vgpu, offset) = 0;
  1273. switch (offset) {
  1274. case 0x4260:
  1275. id = RCS;
  1276. break;
  1277. case 0x4264:
  1278. id = VCS;
  1279. break;
  1280. case 0x4268:
  1281. id = VCS2;
  1282. break;
  1283. case 0x426c:
  1284. id = BCS;
  1285. break;
  1286. case 0x4270:
  1287. id = VECS;
  1288. break;
  1289. default:
  1290. return -EINVAL;
  1291. }
  1292. set_bit(id, (void *)vgpu->tlb_handle_pending);
  1293. return 0;
  1294. }
  1295. static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
  1296. unsigned int offset, void *p_data, unsigned int bytes)
  1297. {
  1298. u32 data;
  1299. write_vreg(vgpu, offset, p_data, bytes);
  1300. data = vgpu_vreg(vgpu, offset);
  1301. if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
  1302. data |= RESET_CTL_READY_TO_RESET;
  1303. else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
  1304. data &= ~RESET_CTL_READY_TO_RESET;
  1305. vgpu_vreg(vgpu, offset) = data;
  1306. return 0;
  1307. }
  1308. #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
  1309. ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
  1310. f, s, am, rm, d, r, w); \
  1311. if (ret) \
  1312. return ret; \
  1313. } while (0)
  1314. #define MMIO_D(reg, d) \
  1315. MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
  1316. #define MMIO_DH(reg, d, r, w) \
  1317. MMIO_F(reg, 4, 0, 0, 0, d, r, w)
  1318. #define MMIO_DFH(reg, d, f, r, w) \
  1319. MMIO_F(reg, 4, f, 0, 0, d, r, w)
  1320. #define MMIO_GM(reg, d, r, w) \
  1321. MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
  1322. #define MMIO_GM_RDR(reg, d, r, w) \
  1323. MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
  1324. #define MMIO_RO(reg, d, f, rm, r, w) \
  1325. MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
  1326. #define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
  1327. MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
  1328. MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
  1329. MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
  1330. MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
  1331. if (HAS_BSD2(dev_priv)) \
  1332. MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
  1333. } while (0)
  1334. #define MMIO_RING_D(prefix, d) \
  1335. MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
  1336. #define MMIO_RING_DFH(prefix, d, f, r, w) \
  1337. MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
  1338. #define MMIO_RING_GM(prefix, d, r, w) \
  1339. MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
  1340. #define MMIO_RING_GM_RDR(prefix, d, r, w) \
  1341. MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
  1342. #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
  1343. MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
  1344. static int init_generic_mmio_info(struct intel_gvt *gvt)
  1345. {
  1346. struct drm_i915_private *dev_priv = gvt->dev_priv;
  1347. int ret;
  1348. MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
  1349. intel_vgpu_reg_imr_handler);
  1350. MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
  1351. MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
  1352. MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
  1353. MMIO_D(SDEISR, D_ALL);
  1354. MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1355. MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
  1356. MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
  1357. MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
  1358. MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
  1359. #define RING_REG(base) (base + 0x28)
  1360. MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1361. #undef RING_REG
  1362. #define RING_REG(base) (base + 0x134)
  1363. MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1364. #undef RING_REG
  1365. #define RING_REG(base) (base + 0x6c)
  1366. MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
  1367. #undef RING_REG
  1368. MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
  1369. MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
  1370. MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
  1371. MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
  1372. MMIO_D(GEN7_CXT_SIZE, D_ALL);
  1373. MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1374. MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1375. MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1376. MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
  1377. MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
  1378. /* RING MODE */
  1379. #define RING_REG(base) (base + 0x29c)
  1380. MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
  1381. ring_mode_mmio_write);
  1382. #undef RING_REG
  1383. MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
  1384. NULL, NULL);
  1385. MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
  1386. NULL, NULL);
  1387. MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
  1388. mmio_read_from_hw, NULL);
  1389. MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
  1390. mmio_read_from_hw, NULL);
  1391. MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1392. MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
  1393. NULL, NULL);
  1394. MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1395. MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1396. MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1397. MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1398. MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1399. MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1400. MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1401. MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1402. MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1403. MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
  1404. NULL, NULL);
  1405. MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
  1406. skl_misc_ctl_write);
  1407. MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1408. MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1409. MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1410. MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1411. MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1412. MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1413. MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1414. MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1415. MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1416. MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1417. /* display */
  1418. MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
  1419. MMIO_D(0x602a0, D_ALL);
  1420. MMIO_D(0x65050, D_ALL);
  1421. MMIO_D(0x650b4, D_ALL);
  1422. MMIO_D(0xc4040, D_ALL);
  1423. MMIO_D(DERRMR, D_ALL);
  1424. MMIO_D(PIPEDSL(PIPE_A), D_ALL);
  1425. MMIO_D(PIPEDSL(PIPE_B), D_ALL);
  1426. MMIO_D(PIPEDSL(PIPE_C), D_ALL);
  1427. MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
  1428. MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
  1429. MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
  1430. MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
  1431. MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
  1432. MMIO_D(PIPESTAT(PIPE_A), D_ALL);
  1433. MMIO_D(PIPESTAT(PIPE_B), D_ALL);
  1434. MMIO_D(PIPESTAT(PIPE_C), D_ALL);
  1435. MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
  1436. MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
  1437. MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
  1438. MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
  1439. MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
  1440. MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
  1441. MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
  1442. MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
  1443. MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
  1444. MMIO_D(CURCNTR(PIPE_A), D_ALL);
  1445. MMIO_D(CURCNTR(PIPE_B), D_ALL);
  1446. MMIO_D(CURCNTR(PIPE_C), D_ALL);
  1447. MMIO_D(CURPOS(PIPE_A), D_ALL);
  1448. MMIO_D(CURPOS(PIPE_B), D_ALL);
  1449. MMIO_D(CURPOS(PIPE_C), D_ALL);
  1450. MMIO_D(CURBASE(PIPE_A), D_ALL);
  1451. MMIO_D(CURBASE(PIPE_B), D_ALL);
  1452. MMIO_D(CURBASE(PIPE_C), D_ALL);
  1453. MMIO_D(0x700ac, D_ALL);
  1454. MMIO_D(0x710ac, D_ALL);
  1455. MMIO_D(0x720ac, D_ALL);
  1456. MMIO_D(0x70090, D_ALL);
  1457. MMIO_D(0x70094, D_ALL);
  1458. MMIO_D(0x70098, D_ALL);
  1459. MMIO_D(0x7009c, D_ALL);
  1460. MMIO_D(DSPCNTR(PIPE_A), D_ALL);
  1461. MMIO_D(DSPADDR(PIPE_A), D_ALL);
  1462. MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
  1463. MMIO_D(DSPPOS(PIPE_A), D_ALL);
  1464. MMIO_D(DSPSIZE(PIPE_A), D_ALL);
  1465. MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
  1466. MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
  1467. MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
  1468. MMIO_D(DSPCNTR(PIPE_B), D_ALL);
  1469. MMIO_D(DSPADDR(PIPE_B), D_ALL);
  1470. MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
  1471. MMIO_D(DSPPOS(PIPE_B), D_ALL);
  1472. MMIO_D(DSPSIZE(PIPE_B), D_ALL);
  1473. MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
  1474. MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
  1475. MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
  1476. MMIO_D(DSPCNTR(PIPE_C), D_ALL);
  1477. MMIO_D(DSPADDR(PIPE_C), D_ALL);
  1478. MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
  1479. MMIO_D(DSPPOS(PIPE_C), D_ALL);
  1480. MMIO_D(DSPSIZE(PIPE_C), D_ALL);
  1481. MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
  1482. MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
  1483. MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
  1484. MMIO_D(SPRCTL(PIPE_A), D_ALL);
  1485. MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
  1486. MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
  1487. MMIO_D(SPRPOS(PIPE_A), D_ALL);
  1488. MMIO_D(SPRSIZE(PIPE_A), D_ALL);
  1489. MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
  1490. MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
  1491. MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
  1492. MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
  1493. MMIO_D(SPROFFSET(PIPE_A), D_ALL);
  1494. MMIO_D(SPRSCALE(PIPE_A), D_ALL);
  1495. MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
  1496. MMIO_D(SPRCTL(PIPE_B), D_ALL);
  1497. MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
  1498. MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
  1499. MMIO_D(SPRPOS(PIPE_B), D_ALL);
  1500. MMIO_D(SPRSIZE(PIPE_B), D_ALL);
  1501. MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
  1502. MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
  1503. MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
  1504. MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
  1505. MMIO_D(SPROFFSET(PIPE_B), D_ALL);
  1506. MMIO_D(SPRSCALE(PIPE_B), D_ALL);
  1507. MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
  1508. MMIO_D(SPRCTL(PIPE_C), D_ALL);
  1509. MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
  1510. MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
  1511. MMIO_D(SPRPOS(PIPE_C), D_ALL);
  1512. MMIO_D(SPRSIZE(PIPE_C), D_ALL);
  1513. MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
  1514. MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
  1515. MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
  1516. MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
  1517. MMIO_D(SPROFFSET(PIPE_C), D_ALL);
  1518. MMIO_D(SPRSCALE(PIPE_C), D_ALL);
  1519. MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
  1520. MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
  1521. MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
  1522. MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
  1523. MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
  1524. MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
  1525. MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
  1526. MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
  1527. MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
  1528. MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
  1529. MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
  1530. MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
  1531. MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
  1532. MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
  1533. MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
  1534. MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
  1535. MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
  1536. MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
  1537. MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
  1538. MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
  1539. MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
  1540. MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
  1541. MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
  1542. MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
  1543. MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
  1544. MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
  1545. MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
  1546. MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
  1547. MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
  1548. MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
  1549. MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
  1550. MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
  1551. MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
  1552. MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
  1553. MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
  1554. MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
  1555. MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
  1556. MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
  1557. MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
  1558. MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
  1559. MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
  1560. MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
  1561. MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
  1562. MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
  1563. MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
  1564. MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
  1565. MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
  1566. MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
  1567. MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
  1568. MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
  1569. MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
  1570. MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
  1571. MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
  1572. MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
  1573. MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
  1574. MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
  1575. MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
  1576. MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
  1577. MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
  1578. MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
  1579. MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
  1580. MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
  1581. MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
  1582. MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
  1583. MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
  1584. MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
  1585. MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
  1586. MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
  1587. MMIO_D(PF_CTL(PIPE_A), D_ALL);
  1588. MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
  1589. MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
  1590. MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
  1591. MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
  1592. MMIO_D(PF_CTL(PIPE_B), D_ALL);
  1593. MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
  1594. MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
  1595. MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
  1596. MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
  1597. MMIO_D(PF_CTL(PIPE_C), D_ALL);
  1598. MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
  1599. MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
  1600. MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
  1601. MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
  1602. MMIO_D(WM0_PIPEA_ILK, D_ALL);
  1603. MMIO_D(WM0_PIPEB_ILK, D_ALL);
  1604. MMIO_D(WM0_PIPEC_IVB, D_ALL);
  1605. MMIO_D(WM1_LP_ILK, D_ALL);
  1606. MMIO_D(WM2_LP_ILK, D_ALL);
  1607. MMIO_D(WM3_LP_ILK, D_ALL);
  1608. MMIO_D(WM1S_LP_ILK, D_ALL);
  1609. MMIO_D(WM2S_LP_IVB, D_ALL);
  1610. MMIO_D(WM3S_LP_IVB, D_ALL);
  1611. MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
  1612. MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
  1613. MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
  1614. MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
  1615. MMIO_D(0x48268, D_ALL);
  1616. MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
  1617. gmbus_mmio_write);
  1618. MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
  1619. MMIO_F(0xe4f00, 0x28, 0, 0, 0, D_ALL, NULL, NULL);
  1620. MMIO_F(_PCH_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
  1621. dp_aux_ch_ctl_mmio_write);
  1622. MMIO_F(_PCH_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
  1623. dp_aux_ch_ctl_mmio_write);
  1624. MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
  1625. dp_aux_ch_ctl_mmio_write);
  1626. MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
  1627. MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
  1628. MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
  1629. MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
  1630. MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
  1631. MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
  1632. MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
  1633. MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
  1634. MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
  1635. MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
  1636. MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
  1637. MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
  1638. MMIO_D(_PCH_TRANS_HTOTAL_A, D_ALL);
  1639. MMIO_D(_PCH_TRANS_HBLANK_A, D_ALL);
  1640. MMIO_D(_PCH_TRANS_HSYNC_A, D_ALL);
  1641. MMIO_D(_PCH_TRANS_VTOTAL_A, D_ALL);
  1642. MMIO_D(_PCH_TRANS_VBLANK_A, D_ALL);
  1643. MMIO_D(_PCH_TRANS_VSYNC_A, D_ALL);
  1644. MMIO_D(_PCH_TRANS_VSYNCSHIFT_A, D_ALL);
  1645. MMIO_D(_PCH_TRANS_HTOTAL_B, D_ALL);
  1646. MMIO_D(_PCH_TRANS_HBLANK_B, D_ALL);
  1647. MMIO_D(_PCH_TRANS_HSYNC_B, D_ALL);
  1648. MMIO_D(_PCH_TRANS_VTOTAL_B, D_ALL);
  1649. MMIO_D(_PCH_TRANS_VBLANK_B, D_ALL);
  1650. MMIO_D(_PCH_TRANS_VSYNC_B, D_ALL);
  1651. MMIO_D(_PCH_TRANS_VSYNCSHIFT_B, D_ALL);
  1652. MMIO_D(_PCH_TRANSA_DATA_M1, D_ALL);
  1653. MMIO_D(_PCH_TRANSA_DATA_N1, D_ALL);
  1654. MMIO_D(_PCH_TRANSA_DATA_M2, D_ALL);
  1655. MMIO_D(_PCH_TRANSA_DATA_N2, D_ALL);
  1656. MMIO_D(_PCH_TRANSA_LINK_M1, D_ALL);
  1657. MMIO_D(_PCH_TRANSA_LINK_N1, D_ALL);
  1658. MMIO_D(_PCH_TRANSA_LINK_M2, D_ALL);
  1659. MMIO_D(_PCH_TRANSA_LINK_N2, D_ALL);
  1660. MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
  1661. MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
  1662. MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
  1663. MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
  1664. MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
  1665. MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
  1666. MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
  1667. MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
  1668. MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
  1669. MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
  1670. MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
  1671. MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
  1672. MMIO_D(_FDI_RXA_MISC, D_ALL);
  1673. MMIO_D(_FDI_RXB_MISC, D_ALL);
  1674. MMIO_D(_FDI_RXA_TUSIZE1, D_ALL);
  1675. MMIO_D(_FDI_RXA_TUSIZE2, D_ALL);
  1676. MMIO_D(_FDI_RXB_TUSIZE1, D_ALL);
  1677. MMIO_D(_FDI_RXB_TUSIZE2, D_ALL);
  1678. MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
  1679. MMIO_D(PCH_PP_DIVISOR, D_ALL);
  1680. MMIO_D(PCH_PP_STATUS, D_ALL);
  1681. MMIO_D(PCH_LVDS, D_ALL);
  1682. MMIO_D(_PCH_DPLL_A, D_ALL);
  1683. MMIO_D(_PCH_DPLL_B, D_ALL);
  1684. MMIO_D(_PCH_FPA0, D_ALL);
  1685. MMIO_D(_PCH_FPA1, D_ALL);
  1686. MMIO_D(_PCH_FPB0, D_ALL);
  1687. MMIO_D(_PCH_FPB1, D_ALL);
  1688. MMIO_D(PCH_DREF_CONTROL, D_ALL);
  1689. MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
  1690. MMIO_D(PCH_DPLL_SEL, D_ALL);
  1691. MMIO_D(0x61208, D_ALL);
  1692. MMIO_D(0x6120c, D_ALL);
  1693. MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
  1694. MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
  1695. MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL);
  1696. MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
  1697. MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
  1698. MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
  1699. MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL);
  1700. MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL);
  1701. MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
  1702. PORTA_HOTPLUG_STATUS_MASK
  1703. | PORTB_HOTPLUG_STATUS_MASK
  1704. | PORTC_HOTPLUG_STATUS_MASK
  1705. | PORTD_HOTPLUG_STATUS_MASK,
  1706. NULL, NULL);
  1707. MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
  1708. MMIO_D(FUSE_STRAP, D_ALL);
  1709. MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
  1710. MMIO_D(DISP_ARB_CTL, D_ALL);
  1711. MMIO_D(DISP_ARB_CTL2, D_ALL);
  1712. MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
  1713. MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
  1714. MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
  1715. MMIO_D(SOUTH_CHICKEN1, D_ALL);
  1716. MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
  1717. MMIO_D(_TRANSA_CHICKEN1, D_ALL);
  1718. MMIO_D(_TRANSB_CHICKEN1, D_ALL);
  1719. MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
  1720. MMIO_D(_TRANSA_CHICKEN2, D_ALL);
  1721. MMIO_D(_TRANSB_CHICKEN2, D_ALL);
  1722. MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
  1723. MMIO_D(ILK_DPFC_CONTROL, D_ALL);
  1724. MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL);
  1725. MMIO_D(ILK_DPFC_STATUS, D_ALL);
  1726. MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL);
  1727. MMIO_D(ILK_DPFC_CHICKEN, D_ALL);
  1728. MMIO_D(ILK_FBC_RT_BASE, D_ALL);
  1729. MMIO_D(IPS_CTL, D_ALL);
  1730. MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
  1731. MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
  1732. MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
  1733. MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
  1734. MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
  1735. MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
  1736. MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
  1737. MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
  1738. MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
  1739. MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
  1740. MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
  1741. MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
  1742. MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
  1743. MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
  1744. MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
  1745. MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
  1746. MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
  1747. MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
  1748. MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
  1749. MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
  1750. MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
  1751. MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
  1752. MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
  1753. MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
  1754. MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
  1755. MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
  1756. MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
  1757. MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
  1758. MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
  1759. MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
  1760. MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
  1761. MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
  1762. MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
  1763. MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
  1764. MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
  1765. MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
  1766. MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
  1767. MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
  1768. MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
  1769. MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
  1770. MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
  1771. MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
  1772. MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
  1773. MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
  1774. MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
  1775. MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
  1776. MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
  1777. MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
  1778. MMIO_D(0x60110, D_ALL);
  1779. MMIO_D(0x61110, D_ALL);
  1780. MMIO_F(0x70400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
  1781. MMIO_F(0x71400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
  1782. MMIO_F(0x72400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
  1783. MMIO_F(0x70440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1784. MMIO_F(0x71440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1785. MMIO_F(0x72440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1786. MMIO_F(0x7044c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1787. MMIO_F(0x7144c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1788. MMIO_F(0x7244c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1789. MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
  1790. MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
  1791. MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
  1792. MMIO_D(SPLL_CTL, D_ALL);
  1793. MMIO_D(_WRPLL_CTL1, D_ALL);
  1794. MMIO_D(_WRPLL_CTL2, D_ALL);
  1795. MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
  1796. MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
  1797. MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
  1798. MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
  1799. MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
  1800. MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
  1801. MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
  1802. MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
  1803. MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
  1804. MMIO_D(0x46508, D_ALL);
  1805. MMIO_D(0x49080, D_ALL);
  1806. MMIO_D(0x49180, D_ALL);
  1807. MMIO_D(0x49280, D_ALL);
  1808. MMIO_F(0x49090, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
  1809. MMIO_F(0x49190, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
  1810. MMIO_F(0x49290, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
  1811. MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
  1812. MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
  1813. MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
  1814. MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
  1815. MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
  1816. MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
  1817. MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
  1818. MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
  1819. MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
  1820. MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
  1821. MMIO_D(SBI_ADDR, D_ALL);
  1822. MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
  1823. MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
  1824. MMIO_D(PIXCLK_GATE, D_ALL);
  1825. MMIO_F(_DPA_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_ALL, NULL,
  1826. dp_aux_ch_ctl_mmio_write);
  1827. MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  1828. MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  1829. MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  1830. MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  1831. MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  1832. MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
  1833. MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
  1834. MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
  1835. MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
  1836. MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
  1837. MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
  1838. MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
  1839. MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
  1840. MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
  1841. MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
  1842. MMIO_F(_DDI_BUF_TRANS_A, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  1843. MMIO_F(0x64e60, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  1844. MMIO_F(0x64eC0, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  1845. MMIO_F(0x64f20, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  1846. MMIO_F(0x64f80, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  1847. MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
  1848. MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
  1849. MMIO_DH(_TRANS_DDI_FUNC_CTL_A, D_ALL, NULL, NULL);
  1850. MMIO_DH(_TRANS_DDI_FUNC_CTL_B, D_ALL, NULL, NULL);
  1851. MMIO_DH(_TRANS_DDI_FUNC_CTL_C, D_ALL, NULL, NULL);
  1852. MMIO_DH(_TRANS_DDI_FUNC_CTL_EDP, D_ALL, NULL, NULL);
  1853. MMIO_D(_TRANSA_MSA_MISC, D_ALL);
  1854. MMIO_D(_TRANSB_MSA_MISC, D_ALL);
  1855. MMIO_D(_TRANSC_MSA_MISC, D_ALL);
  1856. MMIO_D(_TRANS_EDP_MSA_MISC, D_ALL);
  1857. MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
  1858. MMIO_D(FORCEWAKE_ACK, D_ALL);
  1859. MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
  1860. MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
  1861. MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1862. MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1863. MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
  1864. MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
  1865. MMIO_D(ECOBUS, D_ALL);
  1866. MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
  1867. MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
  1868. MMIO_D(GEN6_RPNSWREQ, D_ALL);
  1869. MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
  1870. MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
  1871. MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
  1872. MMIO_D(GEN6_RPSTAT1, D_ALL);
  1873. MMIO_D(GEN6_RP_CONTROL, D_ALL);
  1874. MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
  1875. MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
  1876. MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
  1877. MMIO_D(GEN6_RP_CUR_UP, D_ALL);
  1878. MMIO_D(GEN6_RP_PREV_UP, D_ALL);
  1879. MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
  1880. MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
  1881. MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
  1882. MMIO_D(GEN6_RP_UP_EI, D_ALL);
  1883. MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
  1884. MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
  1885. MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
  1886. MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
  1887. MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
  1888. MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
  1889. MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
  1890. MMIO_D(GEN6_RC_SLEEP, D_ALL);
  1891. MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
  1892. MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
  1893. MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
  1894. MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
  1895. MMIO_D(GEN6_PMINTRMSK, D_ALL);
  1896. /*
  1897. * Use an arbitrary power well controlled by the PWR_WELL_CTL
  1898. * register.
  1899. */
  1900. MMIO_DH(HSW_PWR_WELL_CTL_BIOS(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
  1901. power_well_ctl_mmio_write);
  1902. MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
  1903. power_well_ctl_mmio_write);
  1904. MMIO_DH(HSW_PWR_WELL_CTL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
  1905. MMIO_DH(HSW_PWR_WELL_CTL_DEBUG(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
  1906. power_well_ctl_mmio_write);
  1907. MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
  1908. MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
  1909. MMIO_D(RSTDBYCTL, D_ALL);
  1910. MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
  1911. MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
  1912. MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
  1913. MMIO_D(TILECTL, D_ALL);
  1914. MMIO_D(GEN6_UCGCTL1, D_ALL);
  1915. MMIO_D(GEN6_UCGCTL2, D_ALL);
  1916. MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
  1917. MMIO_D(GEN6_PCODE_DATA, D_ALL);
  1918. MMIO_D(0x13812c, D_ALL);
  1919. MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
  1920. MMIO_D(HSW_EDRAM_CAP, D_ALL);
  1921. MMIO_D(HSW_IDICR, D_ALL);
  1922. MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
  1923. MMIO_D(0x3c, D_ALL);
  1924. MMIO_D(0x860, D_ALL);
  1925. MMIO_D(ECOSKPD, D_ALL);
  1926. MMIO_D(0x121d0, D_ALL);
  1927. MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
  1928. MMIO_D(0x41d0, D_ALL);
  1929. MMIO_D(GAC_ECO_BITS, D_ALL);
  1930. MMIO_D(0x6200, D_ALL);
  1931. MMIO_D(0x6204, D_ALL);
  1932. MMIO_D(0x6208, D_ALL);
  1933. MMIO_D(0x7118, D_ALL);
  1934. MMIO_D(0x7180, D_ALL);
  1935. MMIO_D(0x7408, D_ALL);
  1936. MMIO_D(0x7c00, D_ALL);
  1937. MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
  1938. MMIO_D(0x911c, D_ALL);
  1939. MMIO_D(0x9120, D_ALL);
  1940. MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1941. MMIO_D(GAB_CTL, D_ALL);
  1942. MMIO_D(0x48800, D_ALL);
  1943. MMIO_D(0xce044, D_ALL);
  1944. MMIO_D(0xe6500, D_ALL);
  1945. MMIO_D(0xe6504, D_ALL);
  1946. MMIO_D(0xe6600, D_ALL);
  1947. MMIO_D(0xe6604, D_ALL);
  1948. MMIO_D(0xe6700, D_ALL);
  1949. MMIO_D(0xe6704, D_ALL);
  1950. MMIO_D(0xe6800, D_ALL);
  1951. MMIO_D(0xe6804, D_ALL);
  1952. MMIO_D(PCH_GMBUS4, D_ALL);
  1953. MMIO_D(PCH_GMBUS5, D_ALL);
  1954. MMIO_D(0x902c, D_ALL);
  1955. MMIO_D(0xec008, D_ALL);
  1956. MMIO_D(0xec00c, D_ALL);
  1957. MMIO_D(0xec008 + 0x18, D_ALL);
  1958. MMIO_D(0xec00c + 0x18, D_ALL);
  1959. MMIO_D(0xec008 + 0x18 * 2, D_ALL);
  1960. MMIO_D(0xec00c + 0x18 * 2, D_ALL);
  1961. MMIO_D(0xec008 + 0x18 * 3, D_ALL);
  1962. MMIO_D(0xec00c + 0x18 * 3, D_ALL);
  1963. MMIO_D(0xec408, D_ALL);
  1964. MMIO_D(0xec40c, D_ALL);
  1965. MMIO_D(0xec408 + 0x18, D_ALL);
  1966. MMIO_D(0xec40c + 0x18, D_ALL);
  1967. MMIO_D(0xec408 + 0x18 * 2, D_ALL);
  1968. MMIO_D(0xec40c + 0x18 * 2, D_ALL);
  1969. MMIO_D(0xec408 + 0x18 * 3, D_ALL);
  1970. MMIO_D(0xec40c + 0x18 * 3, D_ALL);
  1971. MMIO_D(0xfc810, D_ALL);
  1972. MMIO_D(0xfc81c, D_ALL);
  1973. MMIO_D(0xfc828, D_ALL);
  1974. MMIO_D(0xfc834, D_ALL);
  1975. MMIO_D(0xfcc00, D_ALL);
  1976. MMIO_D(0xfcc0c, D_ALL);
  1977. MMIO_D(0xfcc18, D_ALL);
  1978. MMIO_D(0xfcc24, D_ALL);
  1979. MMIO_D(0xfd000, D_ALL);
  1980. MMIO_D(0xfd00c, D_ALL);
  1981. MMIO_D(0xfd018, D_ALL);
  1982. MMIO_D(0xfd024, D_ALL);
  1983. MMIO_D(0xfd034, D_ALL);
  1984. MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
  1985. MMIO_D(0x2054, D_ALL);
  1986. MMIO_D(0x12054, D_ALL);
  1987. MMIO_D(0x22054, D_ALL);
  1988. MMIO_D(0x1a054, D_ALL);
  1989. MMIO_D(0x44070, D_ALL);
  1990. MMIO_DFH(0x215c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  1991. MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1992. MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1993. MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1994. MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1995. MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
  1996. MMIO_D(0x2b00, D_BDW_PLUS);
  1997. MMIO_D(0x2360, D_BDW_PLUS);
  1998. MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  1999. MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2000. MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2001. MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2002. MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2003. MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2004. MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2005. MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2006. MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2007. MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2008. MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2009. MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2010. MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2011. MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2012. MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2013. MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2014. MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2015. MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  2016. MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  2017. MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  2018. MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  2019. MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  2020. MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2021. MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2022. MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
  2023. MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2024. MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2025. MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2026. MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2027. MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2028. MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2029. MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2030. MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2031. MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2032. return 0;
  2033. }
  2034. static int init_broadwell_mmio_info(struct intel_gvt *gvt)
  2035. {
  2036. struct drm_i915_private *dev_priv = gvt->dev_priv;
  2037. int ret;
  2038. MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2039. MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2040. MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2041. MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
  2042. MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2043. MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2044. MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2045. MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
  2046. MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2047. MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2048. MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2049. MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
  2050. MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2051. MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2052. MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2053. MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
  2054. MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
  2055. intel_vgpu_reg_imr_handler);
  2056. MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
  2057. intel_vgpu_reg_ier_handler);
  2058. MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
  2059. intel_vgpu_reg_iir_handler);
  2060. MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
  2061. MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
  2062. intel_vgpu_reg_imr_handler);
  2063. MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
  2064. intel_vgpu_reg_ier_handler);
  2065. MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
  2066. intel_vgpu_reg_iir_handler);
  2067. MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
  2068. MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
  2069. intel_vgpu_reg_imr_handler);
  2070. MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
  2071. intel_vgpu_reg_ier_handler);
  2072. MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
  2073. intel_vgpu_reg_iir_handler);
  2074. MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
  2075. MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2076. MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2077. MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2078. MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
  2079. MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2080. MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2081. MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2082. MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
  2083. MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2084. MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2085. MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2086. MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
  2087. MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
  2088. intel_vgpu_reg_master_irq_handler);
  2089. MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
  2090. mmio_read_from_hw, NULL);
  2091. #define RING_REG(base) (base + 0xd0)
  2092. MMIO_RING_F(RING_REG, 4, F_RO, 0,
  2093. ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
  2094. ring_reset_ctl_write);
  2095. #undef RING_REG
  2096. #define RING_REG(base) (base + 0x230)
  2097. MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
  2098. #undef RING_REG
  2099. #define RING_REG(base) (base + 0x234)
  2100. MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
  2101. NULL, NULL);
  2102. #undef RING_REG
  2103. #define RING_REG(base) (base + 0x244)
  2104. MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2105. #undef RING_REG
  2106. #define RING_REG(base) (base + 0x370)
  2107. MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
  2108. #undef RING_REG
  2109. #define RING_REG(base) (base + 0x3a0)
  2110. MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
  2111. #undef RING_REG
  2112. MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
  2113. MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
  2114. MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
  2115. MMIO_D(0x1c1d0, D_BDW_PLUS);
  2116. MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
  2117. MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
  2118. MMIO_D(0x1c054, D_BDW_PLUS);
  2119. MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
  2120. MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
  2121. MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
  2122. MMIO_D(GAMTARBMODE, D_BDW_PLUS);
  2123. #define RING_REG(base) (base + 0x270)
  2124. MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
  2125. #undef RING_REG
  2126. MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
  2127. MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2128. MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
  2129. MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
  2130. MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
  2131. MMIO_D(WM_MISC, D_BDW);
  2132. MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
  2133. MMIO_D(0x66c00, D_BDW_PLUS);
  2134. MMIO_D(0x66c04, D_BDW_PLUS);
  2135. MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
  2136. MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
  2137. MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
  2138. MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
  2139. MMIO_D(0xfdc, D_BDW_PLUS);
  2140. MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
  2141. NULL, NULL);
  2142. MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
  2143. NULL, NULL);
  2144. MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2145. MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
  2146. MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
  2147. MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2148. MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
  2149. MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
  2150. MMIO_D(0xb110, D_BDW);
  2151. MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
  2152. NULL, force_nonpriv_write);
  2153. MMIO_D(0x44484, D_BDW_PLUS);
  2154. MMIO_D(0x4448c, D_BDW_PLUS);
  2155. MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
  2156. MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
  2157. MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
  2158. MMIO_D(0x110000, D_BDW_PLUS);
  2159. MMIO_D(0x48400, D_BDW_PLUS);
  2160. MMIO_D(0x6e570, D_BDW_PLUS);
  2161. MMIO_D(0x65f10, D_BDW_PLUS);
  2162. MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
  2163. skl_misc_ctl_write);
  2164. MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2165. MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2166. MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2167. MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
  2168. MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2169. MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2170. MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2171. MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2172. MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2173. MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2174. MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2175. MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2176. MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2177. return 0;
  2178. }
  2179. static int init_skl_mmio_info(struct intel_gvt *gvt)
  2180. {
  2181. struct drm_i915_private *dev_priv = gvt->dev_priv;
  2182. int ret;
  2183. MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
  2184. MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
  2185. MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
  2186. MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
  2187. MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
  2188. MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
  2189. MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
  2190. dp_aux_ch_ctl_mmio_write);
  2191. MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
  2192. dp_aux_ch_ctl_mmio_write);
  2193. MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
  2194. dp_aux_ch_ctl_mmio_write);
  2195. /*
  2196. * Use an arbitrary power well controlled by the PWR_WELL_CTL
  2197. * register.
  2198. */
  2199. MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS);
  2200. MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
  2201. skl_power_well_ctl_write);
  2202. MMIO_D(0xa210, D_SKL_PLUS);
  2203. MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
  2204. MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
  2205. MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
  2206. MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
  2207. MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
  2208. MMIO_D(0x45504, D_SKL_PLUS);
  2209. MMIO_D(0x45520, D_SKL_PLUS);
  2210. MMIO_D(0x46000, D_SKL_PLUS);
  2211. MMIO_DH(0x46010, D_SKL | D_KBL, NULL, skl_lcpll_write);
  2212. MMIO_DH(0x46014, D_SKL | D_KBL, NULL, skl_lcpll_write);
  2213. MMIO_D(0x6C040, D_SKL | D_KBL);
  2214. MMIO_D(0x6C048, D_SKL | D_KBL);
  2215. MMIO_D(0x6C050, D_SKL | D_KBL);
  2216. MMIO_D(0x6C044, D_SKL | D_KBL);
  2217. MMIO_D(0x6C04C, D_SKL | D_KBL);
  2218. MMIO_D(0x6C054, D_SKL | D_KBL);
  2219. MMIO_D(0x6c058, D_SKL | D_KBL);
  2220. MMIO_D(0x6c05c, D_SKL | D_KBL);
  2221. MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL);
  2222. MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
  2223. MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
  2224. MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
  2225. MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
  2226. MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
  2227. MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
  2228. MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
  2229. MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
  2230. MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
  2231. MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
  2232. MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
  2233. MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
  2234. MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
  2235. MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
  2236. MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
  2237. MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
  2238. MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
  2239. MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
  2240. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
  2241. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
  2242. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
  2243. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
  2244. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
  2245. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
  2246. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
  2247. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
  2248. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
  2249. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
  2250. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
  2251. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
  2252. MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
  2253. MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
  2254. MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
  2255. MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2256. MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2257. MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2258. MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2259. MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2260. MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2261. MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2262. MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2263. MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2264. MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2265. MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2266. MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2267. MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
  2268. MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
  2269. MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
  2270. MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
  2271. MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
  2272. MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
  2273. MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
  2274. MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
  2275. MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
  2276. MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
  2277. MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
  2278. MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
  2279. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
  2280. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
  2281. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
  2282. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
  2283. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
  2284. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
  2285. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
  2286. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
  2287. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
  2288. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
  2289. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
  2290. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
  2291. MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
  2292. MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
  2293. MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
  2294. MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
  2295. MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
  2296. MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
  2297. MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
  2298. MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
  2299. MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
  2300. MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
  2301. MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
  2302. MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
  2303. MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
  2304. MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
  2305. MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
  2306. MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
  2307. MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
  2308. MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
  2309. MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
  2310. MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
  2311. MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
  2312. MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
  2313. MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
  2314. MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
  2315. MMIO_D(0x70380, D_SKL_PLUS);
  2316. MMIO_D(0x71380, D_SKL_PLUS);
  2317. MMIO_D(0x72380, D_SKL_PLUS);
  2318. MMIO_D(0x7039c, D_SKL_PLUS);
  2319. MMIO_D(0x8f074, D_SKL | D_KBL);
  2320. MMIO_D(0x8f004, D_SKL | D_KBL);
  2321. MMIO_D(0x8f034, D_SKL | D_KBL);
  2322. MMIO_D(0xb11c, D_SKL | D_KBL);
  2323. MMIO_D(0x51000, D_SKL | D_KBL);
  2324. MMIO_D(0x6c00c, D_SKL_PLUS);
  2325. MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
  2326. MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
  2327. MMIO_D(0xd08, D_SKL_PLUS);
  2328. MMIO_DFH(0x20e0, D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
  2329. MMIO_DFH(0x20ec, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2330. /* TRTT */
  2331. MMIO_DFH(0x4de0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
  2332. MMIO_DFH(0x4de4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
  2333. MMIO_DFH(0x4de8, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
  2334. MMIO_DFH(0x4dec, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
  2335. MMIO_DFH(0x4df0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
  2336. MMIO_DFH(0x4df4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
  2337. MMIO_DH(0x4dfc, D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
  2338. MMIO_D(0x45008, D_SKL | D_KBL);
  2339. MMIO_D(0x46430, D_SKL | D_KBL);
  2340. MMIO_D(0x46520, D_SKL | D_KBL);
  2341. MMIO_D(0xc403c, D_SKL | D_KBL);
  2342. MMIO_D(0xb004, D_SKL_PLUS);
  2343. MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
  2344. MMIO_D(0x65900, D_SKL_PLUS);
  2345. MMIO_D(0x1082c0, D_SKL | D_KBL);
  2346. MMIO_D(0x4068, D_SKL | D_KBL);
  2347. MMIO_D(0x67054, D_SKL | D_KBL);
  2348. MMIO_D(0x6e560, D_SKL | D_KBL);
  2349. MMIO_D(0x6e554, D_SKL | D_KBL);
  2350. MMIO_D(0x2b20, D_SKL | D_KBL);
  2351. MMIO_D(0x65f00, D_SKL | D_KBL);
  2352. MMIO_D(0x65f08, D_SKL | D_KBL);
  2353. MMIO_D(0x320f0, D_SKL | D_KBL);
  2354. MMIO_D(0x70034, D_SKL_PLUS);
  2355. MMIO_D(0x71034, D_SKL_PLUS);
  2356. MMIO_D(0x72034, D_SKL_PLUS);
  2357. MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS);
  2358. MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS);
  2359. MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS);
  2360. MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS);
  2361. MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS);
  2362. MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS);
  2363. MMIO_D(0x44500, D_SKL_PLUS);
  2364. MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
  2365. MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
  2366. NULL, NULL);
  2367. MMIO_D(0x4ab8, D_KBL);
  2368. MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
  2369. return 0;
  2370. }
  2371. static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
  2372. unsigned int offset)
  2373. {
  2374. unsigned long device = intel_gvt_get_device_type(gvt);
  2375. struct gvt_mmio_block *block = gvt->mmio.mmio_block;
  2376. int num = gvt->mmio.num_mmio_block;
  2377. int i;
  2378. for (i = 0; i < num; i++, block++) {
  2379. if (!(device & block->device))
  2380. continue;
  2381. if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
  2382. offset < INTEL_GVT_MMIO_OFFSET(block->offset) + block->size)
  2383. return block;
  2384. }
  2385. return NULL;
  2386. }
  2387. /**
  2388. * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
  2389. * @gvt: GVT device
  2390. *
  2391. * This function is called at the driver unloading stage, to clean up the MMIO
  2392. * information table of GVT device
  2393. *
  2394. */
  2395. void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
  2396. {
  2397. struct hlist_node *tmp;
  2398. struct intel_gvt_mmio_info *e;
  2399. int i;
  2400. hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
  2401. kfree(e);
  2402. vfree(gvt->mmio.mmio_attribute);
  2403. gvt->mmio.mmio_attribute = NULL;
  2404. }
  2405. /* Special MMIO blocks. */
  2406. static struct gvt_mmio_block mmio_blocks[] = {
  2407. {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
  2408. {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
  2409. {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
  2410. pvinfo_mmio_read, pvinfo_mmio_write},
  2411. {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
  2412. {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
  2413. {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
  2414. };
  2415. /**
  2416. * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
  2417. * @gvt: GVT device
  2418. *
  2419. * This function is called at the initialization stage, to setup the MMIO
  2420. * information table for GVT device
  2421. *
  2422. * Returns:
  2423. * zero on success, negative if failed.
  2424. */
  2425. int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
  2426. {
  2427. struct intel_gvt_device_info *info = &gvt->device_info;
  2428. struct drm_i915_private *dev_priv = gvt->dev_priv;
  2429. int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
  2430. int ret;
  2431. gvt->mmio.mmio_attribute = vzalloc(size);
  2432. if (!gvt->mmio.mmio_attribute)
  2433. return -ENOMEM;
  2434. ret = init_generic_mmio_info(gvt);
  2435. if (ret)
  2436. goto err;
  2437. if (IS_BROADWELL(dev_priv)) {
  2438. ret = init_broadwell_mmio_info(gvt);
  2439. if (ret)
  2440. goto err;
  2441. } else if (IS_SKYLAKE(dev_priv)
  2442. || IS_KABYLAKE(dev_priv)) {
  2443. ret = init_broadwell_mmio_info(gvt);
  2444. if (ret)
  2445. goto err;
  2446. ret = init_skl_mmio_info(gvt);
  2447. if (ret)
  2448. goto err;
  2449. }
  2450. gvt->mmio.mmio_block = mmio_blocks;
  2451. gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
  2452. gvt_dbg_mmio("traced %u virtual mmio registers\n",
  2453. gvt->mmio.num_tracked_mmio);
  2454. return 0;
  2455. err:
  2456. intel_gvt_clean_mmio_info(gvt);
  2457. return ret;
  2458. }
  2459. /**
  2460. * intel_vgpu_default_mmio_read - default MMIO read handler
  2461. * @vgpu: a vGPU
  2462. * @offset: access offset
  2463. * @p_data: data return buffer
  2464. * @bytes: access data length
  2465. *
  2466. * Returns:
  2467. * Zero on success, negative error code if failed.
  2468. */
  2469. int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  2470. void *p_data, unsigned int bytes)
  2471. {
  2472. read_vreg(vgpu, offset, p_data, bytes);
  2473. return 0;
  2474. }
  2475. /**
  2476. * intel_t_default_mmio_write - default MMIO write handler
  2477. * @vgpu: a vGPU
  2478. * @offset: access offset
  2479. * @p_data: write data buffer
  2480. * @bytes: access data length
  2481. *
  2482. * Returns:
  2483. * Zero on success, negative error code if failed.
  2484. */
  2485. int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  2486. void *p_data, unsigned int bytes)
  2487. {
  2488. write_vreg(vgpu, offset, p_data, bytes);
  2489. return 0;
  2490. }
  2491. /**
  2492. * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
  2493. * force-nopriv register
  2494. *
  2495. * @gvt: a GVT device
  2496. * @offset: register offset
  2497. *
  2498. * Returns:
  2499. * True if the register is in force-nonpriv whitelist;
  2500. * False if outside;
  2501. */
  2502. bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
  2503. unsigned int offset)
  2504. {
  2505. return in_whitelist(offset);
  2506. }
  2507. /**
  2508. * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
  2509. * @vgpu: a vGPU
  2510. * @offset: register offset
  2511. * @pdata: data buffer
  2512. * @bytes: data length
  2513. *
  2514. * Returns:
  2515. * Zero on success, negative error code if failed.
  2516. */
  2517. int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
  2518. void *pdata, unsigned int bytes, bool is_read)
  2519. {
  2520. struct intel_gvt *gvt = vgpu->gvt;
  2521. struct intel_gvt_mmio_info *mmio_info;
  2522. struct gvt_mmio_block *mmio_block;
  2523. gvt_mmio_func func;
  2524. int ret;
  2525. if (WARN_ON(bytes > 8))
  2526. return -EINVAL;
  2527. /*
  2528. * Handle special MMIO blocks.
  2529. */
  2530. mmio_block = find_mmio_block(gvt, offset);
  2531. if (mmio_block) {
  2532. func = is_read ? mmio_block->read : mmio_block->write;
  2533. if (func)
  2534. return func(vgpu, offset, pdata, bytes);
  2535. goto default_rw;
  2536. }
  2537. /*
  2538. * Normal tracked MMIOs.
  2539. */
  2540. mmio_info = find_mmio_info(gvt, offset);
  2541. if (!mmio_info) {
  2542. if (!vgpu->mmio.disable_warn_untrack)
  2543. gvt_vgpu_err("untracked MMIO %08x len %d\n",
  2544. offset, bytes);
  2545. goto default_rw;
  2546. }
  2547. if (is_read)
  2548. return mmio_info->read(vgpu, offset, pdata, bytes);
  2549. else {
  2550. u64 ro_mask = mmio_info->ro_mask;
  2551. u32 old_vreg = 0, old_sreg = 0;
  2552. u64 data = 0;
  2553. if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
  2554. old_vreg = vgpu_vreg(vgpu, offset);
  2555. old_sreg = vgpu_sreg(vgpu, offset);
  2556. }
  2557. if (likely(!ro_mask))
  2558. ret = mmio_info->write(vgpu, offset, pdata, bytes);
  2559. else if (!~ro_mask) {
  2560. gvt_vgpu_err("try to write RO reg %x\n", offset);
  2561. return 0;
  2562. } else {
  2563. /* keep the RO bits in the virtual register */
  2564. memcpy(&data, pdata, bytes);
  2565. data &= ~ro_mask;
  2566. data |= vgpu_vreg(vgpu, offset) & ro_mask;
  2567. ret = mmio_info->write(vgpu, offset, &data, bytes);
  2568. }
  2569. /* higher 16bits of mode ctl regs are mask bits for change */
  2570. if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
  2571. u32 mask = vgpu_vreg(vgpu, offset) >> 16;
  2572. vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
  2573. | (vgpu_vreg(vgpu, offset) & mask);
  2574. vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
  2575. | (vgpu_sreg(vgpu, offset) & mask);
  2576. }
  2577. }
  2578. return ret;
  2579. default_rw:
  2580. return is_read ?
  2581. intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
  2582. intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
  2583. }