handlers.c 109 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527
  1. /*
  2. * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. * SOFTWARE.
  22. *
  23. * Authors:
  24. * Kevin Tian <kevin.tian@intel.com>
  25. * Eddie Dong <eddie.dong@intel.com>
  26. * Zhiyuan Lv <zhiyuan.lv@intel.com>
  27. *
  28. * Contributors:
  29. * Min He <min.he@intel.com>
  30. * Tina Zhang <tina.zhang@intel.com>
  31. * Pei Zhang <pei.zhang@intel.com>
  32. * Niu Bing <bing.niu@intel.com>
  33. * Ping Gao <ping.a.gao@intel.com>
  34. * Zhi Wang <zhi.a.wang@intel.com>
  35. *
  36. */
  37. #include "i915_drv.h"
  38. #include "gvt.h"
  39. #include "i915_pvinfo.h"
  40. /* XXX FIXME i915 has changed PP_XXX definition */
  41. #define PCH_PP_STATUS _MMIO(0xc7200)
  42. #define PCH_PP_CONTROL _MMIO(0xc7204)
  43. #define PCH_PP_ON_DELAYS _MMIO(0xc7208)
  44. #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
  45. #define PCH_PP_DIVISOR _MMIO(0xc7210)
  46. unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
  47. {
  48. if (IS_BROADWELL(gvt->dev_priv))
  49. return D_BDW;
  50. else if (IS_SKYLAKE(gvt->dev_priv))
  51. return D_SKL;
  52. else if (IS_KABYLAKE(gvt->dev_priv))
  53. return D_KBL;
  54. else if (IS_BROXTON(gvt->dev_priv))
  55. return D_BXT;
  56. return 0;
  57. }
  58. bool intel_gvt_match_device(struct intel_gvt *gvt,
  59. unsigned long device)
  60. {
  61. return intel_gvt_get_device_type(gvt) & device;
  62. }
  63. static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
  64. void *p_data, unsigned int bytes)
  65. {
  66. memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
  67. }
  68. static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
  69. void *p_data, unsigned int bytes)
  70. {
  71. memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
  72. }
  73. static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt,
  74. unsigned int offset)
  75. {
  76. struct intel_gvt_mmio_info *e;
  77. hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
  78. if (e->offset == offset)
  79. return e;
  80. }
  81. return NULL;
  82. }
  83. static int new_mmio_info(struct intel_gvt *gvt,
  84. u32 offset, u8 flags, u32 size,
  85. u32 addr_mask, u32 ro_mask, u32 device,
  86. gvt_mmio_func read, gvt_mmio_func write)
  87. {
  88. struct intel_gvt_mmio_info *info, *p;
  89. u32 start, end, i;
  90. if (!intel_gvt_match_device(gvt, device))
  91. return 0;
  92. if (WARN_ON(!IS_ALIGNED(offset, 4)))
  93. return -EINVAL;
  94. start = offset;
  95. end = offset + size;
  96. for (i = start; i < end; i += 4) {
  97. info = kzalloc(sizeof(*info), GFP_KERNEL);
  98. if (!info)
  99. return -ENOMEM;
  100. info->offset = i;
  101. p = find_mmio_info(gvt, info->offset);
  102. if (p) {
  103. WARN(1, "dup mmio definition offset %x\n",
  104. info->offset);
  105. kfree(info);
  106. /* We return -EEXIST here to make GVT-g load fail.
  107. * So duplicated MMIO can be found as soon as
  108. * possible.
  109. */
  110. return -EEXIST;
  111. }
  112. info->ro_mask = ro_mask;
  113. info->device = device;
  114. info->read = read ? read : intel_vgpu_default_mmio_read;
  115. info->write = write ? write : intel_vgpu_default_mmio_write;
  116. gvt->mmio.mmio_attribute[info->offset / 4] = flags;
  117. INIT_HLIST_NODE(&info->node);
  118. hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
  119. gvt->mmio.num_tracked_mmio++;
  120. }
  121. return 0;
  122. }
  123. /**
  124. * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
  125. * @gvt: a GVT device
  126. * @offset: register offset
  127. *
  128. * Returns:
  129. * Ring ID on success, negative error code if failed.
  130. */
  131. int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
  132. unsigned int offset)
  133. {
  134. enum intel_engine_id id;
  135. struct intel_engine_cs *engine;
  136. offset &= ~GENMASK(11, 0);
  137. for_each_engine(engine, gvt->dev_priv, id) {
  138. if (engine->mmio_base == offset)
  139. return id;
  140. }
  141. return -ENODEV;
  142. }
  143. #define offset_to_fence_num(offset) \
  144. ((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
  145. #define fence_num_to_offset(num) \
  146. (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
  147. void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
  148. {
  149. switch (reason) {
  150. case GVT_FAILSAFE_UNSUPPORTED_GUEST:
  151. pr_err("Detected your guest driver doesn't support GVT-g.\n");
  152. break;
  153. case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
  154. pr_err("Graphics resource is not enough for the guest\n");
  155. break;
  156. case GVT_FAILSAFE_GUEST_ERR:
  157. pr_err("GVT Internal error for the guest\n");
  158. break;
  159. default:
  160. break;
  161. }
  162. pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
  163. vgpu->failsafe = true;
  164. }
  165. static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
  166. unsigned int fence_num, void *p_data, unsigned int bytes)
  167. {
  168. unsigned int max_fence = vgpu_fence_sz(vgpu);
  169. if (fence_num >= max_fence) {
  170. gvt_vgpu_err("access oob fence reg %d/%d\n",
  171. fence_num, max_fence);
  172. /* When guest access oob fence regs without access
  173. * pv_info first, we treat guest not supporting GVT,
  174. * and we will let vgpu enter failsafe mode.
  175. */
  176. if (!vgpu->pv_notified)
  177. enter_failsafe_mode(vgpu,
  178. GVT_FAILSAFE_UNSUPPORTED_GUEST);
  179. memset(p_data, 0, bytes);
  180. return -EINVAL;
  181. }
  182. return 0;
  183. }
  184. static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
  185. unsigned int offset, void *p_data, unsigned int bytes)
  186. {
  187. u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
  188. if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
  189. if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
  190. gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
  191. else if (!ips)
  192. gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
  193. else {
  194. /* All engines must be enabled together for vGPU,
  195. * since we don't know which engine the ppgtt will
  196. * bind to when shadowing.
  197. */
  198. gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
  199. ips);
  200. return -EINVAL;
  201. }
  202. }
  203. write_vreg(vgpu, offset, p_data, bytes);
  204. return 0;
  205. }
  206. static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
  207. void *p_data, unsigned int bytes)
  208. {
  209. int ret;
  210. ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
  211. p_data, bytes);
  212. if (ret)
  213. return ret;
  214. read_vreg(vgpu, off, p_data, bytes);
  215. return 0;
  216. }
  217. static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
  218. void *p_data, unsigned int bytes)
  219. {
  220. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  221. unsigned int fence_num = offset_to_fence_num(off);
  222. int ret;
  223. ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
  224. if (ret)
  225. return ret;
  226. write_vreg(vgpu, off, p_data, bytes);
  227. mmio_hw_access_pre(dev_priv);
  228. intel_vgpu_write_fence(vgpu, fence_num,
  229. vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
  230. mmio_hw_access_post(dev_priv);
  231. return 0;
  232. }
  233. #define CALC_MODE_MASK_REG(old, new) \
  234. (((new) & GENMASK(31, 16)) \
  235. | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
  236. | ((new) & ((new) >> 16))))
  237. static int mul_force_wake_write(struct intel_vgpu *vgpu,
  238. unsigned int offset, void *p_data, unsigned int bytes)
  239. {
  240. u32 old, new;
  241. uint32_t ack_reg_offset;
  242. old = vgpu_vreg(vgpu, offset);
  243. new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
  244. if (IS_SKYLAKE(vgpu->gvt->dev_priv)
  245. || IS_KABYLAKE(vgpu->gvt->dev_priv)
  246. || IS_BROXTON(vgpu->gvt->dev_priv)) {
  247. switch (offset) {
  248. case FORCEWAKE_RENDER_GEN9_REG:
  249. ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
  250. break;
  251. case FORCEWAKE_BLITTER_GEN9_REG:
  252. ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
  253. break;
  254. case FORCEWAKE_MEDIA_GEN9_REG:
  255. ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
  256. break;
  257. default:
  258. /*should not hit here*/
  259. gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
  260. return -EINVAL;
  261. }
  262. } else {
  263. ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
  264. }
  265. vgpu_vreg(vgpu, offset) = new;
  266. vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
  267. return 0;
  268. }
  269. static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  270. void *p_data, unsigned int bytes)
  271. {
  272. unsigned int engine_mask = 0;
  273. u32 data;
  274. write_vreg(vgpu, offset, p_data, bytes);
  275. data = vgpu_vreg(vgpu, offset);
  276. if (data & GEN6_GRDOM_FULL) {
  277. gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
  278. engine_mask = ALL_ENGINES;
  279. } else {
  280. if (data & GEN6_GRDOM_RENDER) {
  281. gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
  282. engine_mask |= (1 << RCS);
  283. }
  284. if (data & GEN6_GRDOM_MEDIA) {
  285. gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
  286. engine_mask |= (1 << VCS);
  287. }
  288. if (data & GEN6_GRDOM_BLT) {
  289. gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
  290. engine_mask |= (1 << BCS);
  291. }
  292. if (data & GEN6_GRDOM_VECS) {
  293. gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
  294. engine_mask |= (1 << VECS);
  295. }
  296. if (data & GEN8_GRDOM_MEDIA2) {
  297. gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
  298. if (HAS_BSD2(vgpu->gvt->dev_priv))
  299. engine_mask |= (1 << VCS2);
  300. }
  301. }
  302. /* vgpu_lock already hold by emulate mmio r/w */
  303. intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
  304. /* sw will wait for the device to ack the reset request */
  305. vgpu_vreg(vgpu, offset) = 0;
  306. return 0;
  307. }
  308. static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  309. void *p_data, unsigned int bytes)
  310. {
  311. return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
  312. }
  313. static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  314. void *p_data, unsigned int bytes)
  315. {
  316. return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
  317. }
  318. static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
  319. unsigned int offset, void *p_data, unsigned int bytes)
  320. {
  321. write_vreg(vgpu, offset, p_data, bytes);
  322. if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
  323. vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
  324. vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
  325. vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
  326. vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
  327. } else
  328. vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
  329. ~(PP_ON | PP_SEQUENCE_POWER_DOWN
  330. | PP_CYCLE_DELAY_ACTIVE);
  331. return 0;
  332. }
  333. static int transconf_mmio_write(struct intel_vgpu *vgpu,
  334. unsigned int offset, void *p_data, unsigned int bytes)
  335. {
  336. write_vreg(vgpu, offset, p_data, bytes);
  337. if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
  338. vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
  339. else
  340. vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
  341. return 0;
  342. }
  343. static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  344. void *p_data, unsigned int bytes)
  345. {
  346. write_vreg(vgpu, offset, p_data, bytes);
  347. if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
  348. vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
  349. else
  350. vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
  351. if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
  352. vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
  353. else
  354. vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
  355. return 0;
  356. }
  357. static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  358. void *p_data, unsigned int bytes)
  359. {
  360. switch (offset) {
  361. case 0xe651c:
  362. case 0xe661c:
  363. case 0xe671c:
  364. case 0xe681c:
  365. vgpu_vreg(vgpu, offset) = 1 << 17;
  366. break;
  367. case 0xe6c04:
  368. vgpu_vreg(vgpu, offset) = 0x3;
  369. break;
  370. case 0xe6e1c:
  371. vgpu_vreg(vgpu, offset) = 0x2f << 16;
  372. break;
  373. default:
  374. return -EINVAL;
  375. }
  376. read_vreg(vgpu, offset, p_data, bytes);
  377. return 0;
  378. }
  379. static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  380. void *p_data, unsigned int bytes)
  381. {
  382. u32 data;
  383. write_vreg(vgpu, offset, p_data, bytes);
  384. data = vgpu_vreg(vgpu, offset);
  385. if (data & PIPECONF_ENABLE)
  386. vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
  387. else
  388. vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
  389. /* vgpu_lock already hold by emulate mmio r/w */
  390. mutex_unlock(&vgpu->vgpu_lock);
  391. intel_gvt_check_vblank_emulation(vgpu->gvt);
  392. mutex_lock(&vgpu->vgpu_lock);
  393. return 0;
  394. }
  395. /* ascendingly sorted */
  396. static i915_reg_t force_nonpriv_white_list[] = {
  397. GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
  398. GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
  399. GEN8_CS_CHICKEN1,//_MMIO(0x2580)
  400. _MMIO(0x2690),
  401. _MMIO(0x2694),
  402. _MMIO(0x2698),
  403. _MMIO(0x4de0),
  404. _MMIO(0x4de4),
  405. _MMIO(0x4dfc),
  406. GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
  407. _MMIO(0x7014),
  408. HDC_CHICKEN0,//_MMIO(0x7300)
  409. GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
  410. _MMIO(0x7700),
  411. _MMIO(0x7704),
  412. _MMIO(0x7708),
  413. _MMIO(0x770c),
  414. _MMIO(0xb110),
  415. GEN8_L3SQCREG4,//_MMIO(0xb118)
  416. _MMIO(0xe100),
  417. _MMIO(0xe18c),
  418. _MMIO(0xe48c),
  419. _MMIO(0xe5f4),
  420. };
  421. /* a simple bsearch */
  422. static inline bool in_whitelist(unsigned int reg)
  423. {
  424. int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
  425. i915_reg_t *array = force_nonpriv_white_list;
  426. while (left < right) {
  427. int mid = (left + right)/2;
  428. if (reg > array[mid].reg)
  429. left = mid + 1;
  430. else if (reg < array[mid].reg)
  431. right = mid;
  432. else
  433. return true;
  434. }
  435. return false;
  436. }
  437. static int force_nonpriv_write(struct intel_vgpu *vgpu,
  438. unsigned int offset, void *p_data, unsigned int bytes)
  439. {
  440. u32 reg_nonpriv = *(u32 *)p_data;
  441. int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
  442. u32 ring_base;
  443. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  444. int ret = -EINVAL;
  445. if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
  446. gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
  447. vgpu->id, ring_id, offset, bytes);
  448. return ret;
  449. }
  450. ring_base = dev_priv->engine[ring_id]->mmio_base;
  451. if (in_whitelist(reg_nonpriv) ||
  452. reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) {
  453. ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
  454. bytes);
  455. } else
  456. gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
  457. vgpu->id, reg_nonpriv, offset);
  458. return 0;
  459. }
  460. static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  461. void *p_data, unsigned int bytes)
  462. {
  463. write_vreg(vgpu, offset, p_data, bytes);
  464. if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
  465. vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
  466. } else {
  467. vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
  468. if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
  469. vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
  470. &= ~DP_TP_STATUS_AUTOTRAIN_DONE;
  471. }
  472. return 0;
  473. }
  474. static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
  475. unsigned int offset, void *p_data, unsigned int bytes)
  476. {
  477. vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
  478. return 0;
  479. }
  480. #define FDI_LINK_TRAIN_PATTERN1 0
  481. #define FDI_LINK_TRAIN_PATTERN2 1
  482. static int fdi_auto_training_started(struct intel_vgpu *vgpu)
  483. {
  484. u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
  485. u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
  486. u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
  487. if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
  488. (rx_ctl & FDI_RX_ENABLE) &&
  489. (rx_ctl & FDI_AUTO_TRAINING) &&
  490. (tx_ctl & DP_TP_CTL_ENABLE) &&
  491. (tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
  492. return 1;
  493. else
  494. return 0;
  495. }
  496. static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
  497. enum pipe pipe, unsigned int train_pattern)
  498. {
  499. i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
  500. unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
  501. unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
  502. unsigned int fdi_iir_check_bits;
  503. fdi_rx_imr = FDI_RX_IMR(pipe);
  504. fdi_tx_ctl = FDI_TX_CTL(pipe);
  505. fdi_rx_ctl = FDI_RX_CTL(pipe);
  506. if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
  507. fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
  508. fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
  509. fdi_iir_check_bits = FDI_RX_BIT_LOCK;
  510. } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
  511. fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
  512. fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
  513. fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
  514. } else {
  515. gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
  516. return -EINVAL;
  517. }
  518. fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
  519. fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
  520. /* If imr bit has been masked */
  521. if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
  522. return 0;
  523. if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
  524. == fdi_tx_check_bits)
  525. && ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
  526. == fdi_rx_check_bits))
  527. return 1;
  528. else
  529. return 0;
  530. }
  531. #define INVALID_INDEX (~0U)
  532. static unsigned int calc_index(unsigned int offset, unsigned int start,
  533. unsigned int next, unsigned int end, i915_reg_t i915_end)
  534. {
  535. unsigned int range = next - start;
  536. if (!end)
  537. end = i915_mmio_reg_offset(i915_end);
  538. if (offset < start || offset > end)
  539. return INVALID_INDEX;
  540. offset -= start;
  541. return offset / range;
  542. }
  543. #define FDI_RX_CTL_TO_PIPE(offset) \
  544. calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
  545. #define FDI_TX_CTL_TO_PIPE(offset) \
  546. calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
  547. #define FDI_RX_IMR_TO_PIPE(offset) \
  548. calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
  549. static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
  550. unsigned int offset, void *p_data, unsigned int bytes)
  551. {
  552. i915_reg_t fdi_rx_iir;
  553. unsigned int index;
  554. int ret;
  555. if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
  556. index = FDI_RX_CTL_TO_PIPE(offset);
  557. else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
  558. index = FDI_TX_CTL_TO_PIPE(offset);
  559. else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
  560. index = FDI_RX_IMR_TO_PIPE(offset);
  561. else {
  562. gvt_vgpu_err("Unsupport registers %x\n", offset);
  563. return -EINVAL;
  564. }
  565. write_vreg(vgpu, offset, p_data, bytes);
  566. fdi_rx_iir = FDI_RX_IIR(index);
  567. ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
  568. if (ret < 0)
  569. return ret;
  570. if (ret)
  571. vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
  572. ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
  573. if (ret < 0)
  574. return ret;
  575. if (ret)
  576. vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
  577. if (offset == _FDI_RXA_CTL)
  578. if (fdi_auto_training_started(vgpu))
  579. vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
  580. DP_TP_STATUS_AUTOTRAIN_DONE;
  581. return 0;
  582. }
  583. #define DP_TP_CTL_TO_PORT(offset) \
  584. calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
  585. static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  586. void *p_data, unsigned int bytes)
  587. {
  588. i915_reg_t status_reg;
  589. unsigned int index;
  590. u32 data;
  591. write_vreg(vgpu, offset, p_data, bytes);
  592. index = DP_TP_CTL_TO_PORT(offset);
  593. data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
  594. if (data == 0x2) {
  595. status_reg = DP_TP_STATUS(index);
  596. vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
  597. }
  598. return 0;
  599. }
  600. static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
  601. unsigned int offset, void *p_data, unsigned int bytes)
  602. {
  603. u32 reg_val;
  604. u32 sticky_mask;
  605. reg_val = *((u32 *)p_data);
  606. sticky_mask = GENMASK(27, 26) | (1 << 24);
  607. vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
  608. (vgpu_vreg(vgpu, offset) & sticky_mask);
  609. vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
  610. return 0;
  611. }
  612. static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
  613. unsigned int offset, void *p_data, unsigned int bytes)
  614. {
  615. u32 data;
  616. write_vreg(vgpu, offset, p_data, bytes);
  617. data = vgpu_vreg(vgpu, offset);
  618. if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
  619. vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
  620. return 0;
  621. }
  622. static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
  623. unsigned int offset, void *p_data, unsigned int bytes)
  624. {
  625. u32 data;
  626. write_vreg(vgpu, offset, p_data, bytes);
  627. data = vgpu_vreg(vgpu, offset);
  628. if (data & FDI_MPHY_IOSFSB_RESET_CTL)
  629. vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
  630. else
  631. vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
  632. return 0;
  633. }
  634. #define DSPSURF_TO_PIPE(offset) \
  635. calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
  636. static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  637. void *p_data, unsigned int bytes)
  638. {
  639. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  640. unsigned int index = DSPSURF_TO_PIPE(offset);
  641. i915_reg_t surflive_reg = DSPSURFLIVE(index);
  642. int flip_event[] = {
  643. [PIPE_A] = PRIMARY_A_FLIP_DONE,
  644. [PIPE_B] = PRIMARY_B_FLIP_DONE,
  645. [PIPE_C] = PRIMARY_C_FLIP_DONE,
  646. };
  647. write_vreg(vgpu, offset, p_data, bytes);
  648. vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
  649. set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
  650. return 0;
  651. }
  652. #define SPRSURF_TO_PIPE(offset) \
  653. calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
  654. static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  655. void *p_data, unsigned int bytes)
  656. {
  657. unsigned int index = SPRSURF_TO_PIPE(offset);
  658. i915_reg_t surflive_reg = SPRSURFLIVE(index);
  659. int flip_event[] = {
  660. [PIPE_A] = SPRITE_A_FLIP_DONE,
  661. [PIPE_B] = SPRITE_B_FLIP_DONE,
  662. [PIPE_C] = SPRITE_C_FLIP_DONE,
  663. };
  664. write_vreg(vgpu, offset, p_data, bytes);
  665. vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
  666. set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
  667. return 0;
  668. }
  669. static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
  670. unsigned int reg)
  671. {
  672. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  673. enum intel_gvt_event_type event;
  674. if (reg == _DPA_AUX_CH_CTL)
  675. event = AUX_CHANNEL_A;
  676. else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
  677. event = AUX_CHANNEL_B;
  678. else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
  679. event = AUX_CHANNEL_C;
  680. else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
  681. event = AUX_CHANNEL_D;
  682. else {
  683. WARN_ON(true);
  684. return -EINVAL;
  685. }
  686. intel_vgpu_trigger_virtual_event(vgpu, event);
  687. return 0;
  688. }
  689. static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
  690. unsigned int reg, int len, bool data_valid)
  691. {
  692. /* mark transaction done */
  693. value |= DP_AUX_CH_CTL_DONE;
  694. value &= ~DP_AUX_CH_CTL_SEND_BUSY;
  695. value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
  696. if (data_valid)
  697. value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
  698. else
  699. value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
  700. /* message size */
  701. value &= ~(0xf << 20);
  702. value |= (len << 20);
  703. vgpu_vreg(vgpu, reg) = value;
  704. if (value & DP_AUX_CH_CTL_INTERRUPT)
  705. return trigger_aux_channel_interrupt(vgpu, reg);
  706. return 0;
  707. }
  708. static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
  709. uint8_t t)
  710. {
  711. if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
  712. /* training pattern 1 for CR */
  713. /* set LANE0_CR_DONE, LANE1_CR_DONE */
  714. dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
  715. /* set LANE2_CR_DONE, LANE3_CR_DONE */
  716. dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
  717. } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
  718. DPCD_TRAINING_PATTERN_2) {
  719. /* training pattern 2 for EQ */
  720. /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
  721. dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
  722. dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
  723. /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
  724. dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
  725. dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
  726. /* set INTERLANE_ALIGN_DONE */
  727. dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
  728. DPCD_INTERLANE_ALIGN_DONE;
  729. } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
  730. DPCD_LINK_TRAINING_DISABLED) {
  731. /* finish link training */
  732. /* set sink status as synchronized */
  733. dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
  734. }
  735. }
  736. #define _REG_HSW_DP_AUX_CH_CTL(dp) \
  737. ((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
  738. #define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
  739. #define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
  740. #define dpy_is_valid_port(port) \
  741. (((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
  742. static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
  743. unsigned int offset, void *p_data, unsigned int bytes)
  744. {
  745. struct intel_vgpu_display *display = &vgpu->display;
  746. int msg, addr, ctrl, op, len;
  747. int port_index = OFFSET_TO_DP_AUX_PORT(offset);
  748. struct intel_vgpu_dpcd_data *dpcd = NULL;
  749. struct intel_vgpu_port *port = NULL;
  750. u32 data;
  751. if (!dpy_is_valid_port(port_index)) {
  752. gvt_vgpu_err("Unsupported DP port access!\n");
  753. return 0;
  754. }
  755. write_vreg(vgpu, offset, p_data, bytes);
  756. data = vgpu_vreg(vgpu, offset);
  757. if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
  758. || IS_KABYLAKE(vgpu->gvt->dev_priv)
  759. || IS_BROXTON(vgpu->gvt->dev_priv))
  760. && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
  761. /* SKL DPB/C/D aux ctl register changed */
  762. return 0;
  763. } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
  764. offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
  765. /* write to the data registers */
  766. return 0;
  767. }
  768. if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
  769. /* just want to clear the sticky bits */
  770. vgpu_vreg(vgpu, offset) = 0;
  771. return 0;
  772. }
  773. port = &display->ports[port_index];
  774. dpcd = port->dpcd;
  775. /* read out message from DATA1 register */
  776. msg = vgpu_vreg(vgpu, offset + 4);
  777. addr = (msg >> 8) & 0xffff;
  778. ctrl = (msg >> 24) & 0xff;
  779. len = msg & 0xff;
  780. op = ctrl >> 4;
  781. if (op == GVT_AUX_NATIVE_WRITE) {
  782. int t;
  783. uint8_t buf[16];
  784. if ((addr + len + 1) >= DPCD_SIZE) {
  785. /*
  786. * Write request exceeds what we supported,
  787. * DCPD spec: When a Source Device is writing a DPCD
  788. * address not supported by the Sink Device, the Sink
  789. * Device shall reply with AUX NACK and “M” equal to
  790. * zero.
  791. */
  792. /* NAK the write */
  793. vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
  794. dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
  795. return 0;
  796. }
  797. /*
  798. * Write request format: Headr (command + address + size) occupies
  799. * 4 bytes, followed by (len + 1) bytes of data. See details at
  800. * intel_dp_aux_transfer().
  801. */
  802. if ((len + 1 + 4) > AUX_BURST_SIZE) {
  803. gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
  804. return -EINVAL;
  805. }
  806. /* unpack data from vreg to buf */
  807. for (t = 0; t < 4; t++) {
  808. u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
  809. buf[t * 4] = (r >> 24) & 0xff;
  810. buf[t * 4 + 1] = (r >> 16) & 0xff;
  811. buf[t * 4 + 2] = (r >> 8) & 0xff;
  812. buf[t * 4 + 3] = r & 0xff;
  813. }
  814. /* write to virtual DPCD */
  815. if (dpcd && dpcd->data_valid) {
  816. for (t = 0; t <= len; t++) {
  817. int p = addr + t;
  818. dpcd->data[p] = buf[t];
  819. /* check for link training */
  820. if (p == DPCD_TRAINING_PATTERN_SET)
  821. dp_aux_ch_ctl_link_training(dpcd,
  822. buf[t]);
  823. }
  824. }
  825. /* ACK the write */
  826. vgpu_vreg(vgpu, offset + 4) = 0;
  827. dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
  828. dpcd && dpcd->data_valid);
  829. return 0;
  830. }
  831. if (op == GVT_AUX_NATIVE_READ) {
  832. int idx, i, ret = 0;
  833. if ((addr + len + 1) >= DPCD_SIZE) {
  834. /*
  835. * read request exceeds what we supported
  836. * DPCD spec: A Sink Device receiving a Native AUX CH
  837. * read request for an unsupported DPCD address must
  838. * reply with an AUX ACK and read data set equal to
  839. * zero instead of replying with AUX NACK.
  840. */
  841. /* ACK the READ*/
  842. vgpu_vreg(vgpu, offset + 4) = 0;
  843. vgpu_vreg(vgpu, offset + 8) = 0;
  844. vgpu_vreg(vgpu, offset + 12) = 0;
  845. vgpu_vreg(vgpu, offset + 16) = 0;
  846. vgpu_vreg(vgpu, offset + 20) = 0;
  847. dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
  848. true);
  849. return 0;
  850. }
  851. for (idx = 1; idx <= 5; idx++) {
  852. /* clear the data registers */
  853. vgpu_vreg(vgpu, offset + 4 * idx) = 0;
  854. }
  855. /*
  856. * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
  857. */
  858. if ((len + 2) > AUX_BURST_SIZE) {
  859. gvt_vgpu_err("dp_aux_header: len %d is too large\n", len);
  860. return -EINVAL;
  861. }
  862. /* read from virtual DPCD to vreg */
  863. /* first 4 bytes: [ACK][addr][addr+1][addr+2] */
  864. if (dpcd && dpcd->data_valid) {
  865. for (i = 1; i <= (len + 1); i++) {
  866. int t;
  867. t = dpcd->data[addr + i - 1];
  868. t <<= (24 - 8 * (i % 4));
  869. ret |= t;
  870. if ((i % 4 == 3) || (i == (len + 1))) {
  871. vgpu_vreg(vgpu, offset +
  872. (i / 4 + 1) * 4) = ret;
  873. ret = 0;
  874. }
  875. }
  876. }
  877. dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
  878. dpcd && dpcd->data_valid);
  879. return 0;
  880. }
  881. /* i2c transaction starts */
  882. intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
  883. if (data & DP_AUX_CH_CTL_INTERRUPT)
  884. trigger_aux_channel_interrupt(vgpu, offset);
  885. return 0;
  886. }
  887. static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
  888. void *p_data, unsigned int bytes)
  889. {
  890. *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
  891. write_vreg(vgpu, offset, p_data, bytes);
  892. return 0;
  893. }
  894. static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  895. void *p_data, unsigned int bytes)
  896. {
  897. bool vga_disable;
  898. write_vreg(vgpu, offset, p_data, bytes);
  899. vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
  900. gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
  901. vga_disable ? "Disable" : "Enable");
  902. return 0;
  903. }
  904. static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
  905. unsigned int sbi_offset)
  906. {
  907. struct intel_vgpu_display *display = &vgpu->display;
  908. int num = display->sbi.number;
  909. int i;
  910. for (i = 0; i < num; ++i)
  911. if (display->sbi.registers[i].offset == sbi_offset)
  912. break;
  913. if (i == num)
  914. return 0;
  915. return display->sbi.registers[i].value;
  916. }
  917. static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
  918. unsigned int offset, u32 value)
  919. {
  920. struct intel_vgpu_display *display = &vgpu->display;
  921. int num = display->sbi.number;
  922. int i;
  923. for (i = 0; i < num; ++i) {
  924. if (display->sbi.registers[i].offset == offset)
  925. break;
  926. }
  927. if (i == num) {
  928. if (num == SBI_REG_MAX) {
  929. gvt_vgpu_err("SBI caching meets maximum limits\n");
  930. return;
  931. }
  932. display->sbi.number++;
  933. }
  934. display->sbi.registers[i].offset = offset;
  935. display->sbi.registers[i].value = value;
  936. }
  937. static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  938. void *p_data, unsigned int bytes)
  939. {
  940. if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
  941. SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
  942. unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
  943. SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
  944. vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
  945. sbi_offset);
  946. }
  947. read_vreg(vgpu, offset, p_data, bytes);
  948. return 0;
  949. }
  950. static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  951. void *p_data, unsigned int bytes)
  952. {
  953. u32 data;
  954. write_vreg(vgpu, offset, p_data, bytes);
  955. data = vgpu_vreg(vgpu, offset);
  956. data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
  957. data |= SBI_READY;
  958. data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
  959. data |= SBI_RESPONSE_SUCCESS;
  960. vgpu_vreg(vgpu, offset) = data;
  961. if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
  962. SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
  963. unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
  964. SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
  965. write_virtual_sbi_register(vgpu, sbi_offset,
  966. vgpu_vreg_t(vgpu, SBI_DATA));
  967. }
  968. return 0;
  969. }
  970. #define _vgtif_reg(x) \
  971. (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
  972. static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  973. void *p_data, unsigned int bytes)
  974. {
  975. bool invalid_read = false;
  976. read_vreg(vgpu, offset, p_data, bytes);
  977. switch (offset) {
  978. case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
  979. if (offset + bytes > _vgtif_reg(vgt_id) + 4)
  980. invalid_read = true;
  981. break;
  982. case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
  983. _vgtif_reg(avail_rs.fence_num):
  984. if (offset + bytes >
  985. _vgtif_reg(avail_rs.fence_num) + 4)
  986. invalid_read = true;
  987. break;
  988. case 0x78010: /* vgt_caps */
  989. case 0x7881c:
  990. break;
  991. default:
  992. invalid_read = true;
  993. break;
  994. }
  995. if (invalid_read)
  996. gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
  997. offset, bytes, *(u32 *)p_data);
  998. vgpu->pv_notified = true;
  999. return 0;
  1000. }
  1001. static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
  1002. {
  1003. intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
  1004. struct intel_vgpu_mm *mm;
  1005. u64 *pdps;
  1006. pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
  1007. switch (notification) {
  1008. case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
  1009. root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
  1010. /* fall through */
  1011. case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
  1012. mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
  1013. return PTR_ERR_OR_ZERO(mm);
  1014. case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
  1015. case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
  1016. return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
  1017. case VGT_G2V_EXECLIST_CONTEXT_CREATE:
  1018. case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
  1019. case 1: /* Remove this in guest driver. */
  1020. break;
  1021. default:
  1022. gvt_vgpu_err("Invalid PV notification %d\n", notification);
  1023. }
  1024. return 0;
  1025. }
  1026. static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
  1027. {
  1028. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  1029. struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
  1030. char *env[3] = {NULL, NULL, NULL};
  1031. char vmid_str[20];
  1032. char display_ready_str[20];
  1033. snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
  1034. env[0] = display_ready_str;
  1035. snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
  1036. env[1] = vmid_str;
  1037. return kobject_uevent_env(kobj, KOBJ_ADD, env);
  1038. }
  1039. static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  1040. void *p_data, unsigned int bytes)
  1041. {
  1042. u32 data;
  1043. int ret;
  1044. write_vreg(vgpu, offset, p_data, bytes);
  1045. data = vgpu_vreg(vgpu, offset);
  1046. switch (offset) {
  1047. case _vgtif_reg(display_ready):
  1048. send_display_ready_uevent(vgpu, data ? 1 : 0);
  1049. break;
  1050. case _vgtif_reg(g2v_notify):
  1051. ret = handle_g2v_notification(vgpu, data);
  1052. break;
  1053. /* add xhot and yhot to handled list to avoid error log */
  1054. case _vgtif_reg(cursor_x_hot):
  1055. case _vgtif_reg(cursor_y_hot):
  1056. case _vgtif_reg(pdp[0].lo):
  1057. case _vgtif_reg(pdp[0].hi):
  1058. case _vgtif_reg(pdp[1].lo):
  1059. case _vgtif_reg(pdp[1].hi):
  1060. case _vgtif_reg(pdp[2].lo):
  1061. case _vgtif_reg(pdp[2].hi):
  1062. case _vgtif_reg(pdp[3].lo):
  1063. case _vgtif_reg(pdp[3].hi):
  1064. case _vgtif_reg(execlist_context_descriptor_lo):
  1065. case _vgtif_reg(execlist_context_descriptor_hi):
  1066. break;
  1067. case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
  1068. enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
  1069. break;
  1070. default:
  1071. gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
  1072. offset, bytes, data);
  1073. break;
  1074. }
  1075. return 0;
  1076. }
  1077. static int pf_write(struct intel_vgpu *vgpu,
  1078. unsigned int offset, void *p_data, unsigned int bytes)
  1079. {
  1080. u32 val = *(u32 *)p_data;
  1081. if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
  1082. offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
  1083. offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
  1084. WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
  1085. vgpu->id);
  1086. return 0;
  1087. }
  1088. return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
  1089. }
  1090. static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
  1091. unsigned int offset, void *p_data, unsigned int bytes)
  1092. {
  1093. write_vreg(vgpu, offset, p_data, bytes);
  1094. if (vgpu_vreg(vgpu, offset) &
  1095. HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
  1096. vgpu_vreg(vgpu, offset) |=
  1097. HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
  1098. else
  1099. vgpu_vreg(vgpu, offset) &=
  1100. ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
  1101. return 0;
  1102. }
  1103. static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
  1104. unsigned int offset, void *p_data, unsigned int bytes)
  1105. {
  1106. write_vreg(vgpu, offset, p_data, bytes);
  1107. if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
  1108. vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
  1109. else
  1110. vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
  1111. return 0;
  1112. }
  1113. static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
  1114. unsigned int offset, void *p_data, unsigned int bytes)
  1115. {
  1116. write_vreg(vgpu, offset, p_data, bytes);
  1117. if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
  1118. vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
  1119. return 0;
  1120. }
  1121. static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
  1122. void *p_data, unsigned int bytes)
  1123. {
  1124. u32 mode;
  1125. write_vreg(vgpu, offset, p_data, bytes);
  1126. mode = vgpu_vreg(vgpu, offset);
  1127. if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
  1128. WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
  1129. vgpu->id);
  1130. return 0;
  1131. }
  1132. return 0;
  1133. }
  1134. static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
  1135. void *p_data, unsigned int bytes)
  1136. {
  1137. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  1138. u32 trtte = *(u32 *)p_data;
  1139. if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
  1140. WARN(1, "VM(%d): Use physical address for TRTT!\n",
  1141. vgpu->id);
  1142. return -EINVAL;
  1143. }
  1144. write_vreg(vgpu, offset, p_data, bytes);
  1145. /* TRTTE is not per-context */
  1146. mmio_hw_access_pre(dev_priv);
  1147. I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
  1148. mmio_hw_access_post(dev_priv);
  1149. return 0;
  1150. }
  1151. static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
  1152. void *p_data, unsigned int bytes)
  1153. {
  1154. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  1155. u32 val = *(u32 *)p_data;
  1156. if (val & 1) {
  1157. /* unblock hw logic */
  1158. mmio_hw_access_pre(dev_priv);
  1159. I915_WRITE(_MMIO(offset), val);
  1160. mmio_hw_access_post(dev_priv);
  1161. }
  1162. write_vreg(vgpu, offset, p_data, bytes);
  1163. return 0;
  1164. }
  1165. static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
  1166. void *p_data, unsigned int bytes)
  1167. {
  1168. u32 v = 0;
  1169. if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
  1170. v |= (1 << 0);
  1171. if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
  1172. v |= (1 << 8);
  1173. if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
  1174. v |= (1 << 16);
  1175. if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
  1176. v |= (1 << 24);
  1177. vgpu_vreg(vgpu, offset) = v;
  1178. return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
  1179. }
  1180. static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
  1181. void *p_data, unsigned int bytes)
  1182. {
  1183. u32 value = *(u32 *)p_data;
  1184. u32 cmd = value & 0xff;
  1185. u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
  1186. switch (cmd) {
  1187. case GEN9_PCODE_READ_MEM_LATENCY:
  1188. if (IS_SKYLAKE(vgpu->gvt->dev_priv)
  1189. || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
  1190. /**
  1191. * "Read memory latency" command on gen9.
  1192. * Below memory latency values are read
  1193. * from skylake platform.
  1194. */
  1195. if (!*data0)
  1196. *data0 = 0x1e1a1100;
  1197. else
  1198. *data0 = 0x61514b3d;
  1199. } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
  1200. /**
  1201. * "Read memory latency" command on gen9.
  1202. * Below memory latency values are read
  1203. * from Broxton MRB.
  1204. */
  1205. if (!*data0)
  1206. *data0 = 0x16080707;
  1207. else
  1208. *data0 = 0x16161616;
  1209. }
  1210. break;
  1211. case SKL_PCODE_CDCLK_CONTROL:
  1212. if (IS_SKYLAKE(vgpu->gvt->dev_priv)
  1213. || IS_KABYLAKE(vgpu->gvt->dev_priv))
  1214. *data0 = SKL_CDCLK_READY_FOR_CHANGE;
  1215. break;
  1216. case GEN6_PCODE_READ_RC6VIDS:
  1217. *data0 |= 0x1;
  1218. break;
  1219. }
  1220. gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
  1221. vgpu->id, value, *data0);
  1222. /**
  1223. * PCODE_READY clear means ready for pcode read/write,
  1224. * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
  1225. * always emulate as pcode read/write success and ready for access
  1226. * anytime, since we don't touch real physical registers here.
  1227. */
  1228. value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
  1229. return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
  1230. }
  1231. static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
  1232. void *p_data, unsigned int bytes)
  1233. {
  1234. u32 value = *(u32 *)p_data;
  1235. int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
  1236. if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
  1237. gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
  1238. offset, value);
  1239. return -EINVAL;
  1240. }
  1241. /*
  1242. * Need to emulate all the HWSP register write to ensure host can
  1243. * update the VM CSB status correctly. Here listed registers can
  1244. * support BDW, SKL or other platforms with same HWSP registers.
  1245. */
  1246. if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
  1247. gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
  1248. offset);
  1249. return -EINVAL;
  1250. }
  1251. vgpu->hws_pga[ring_id] = value;
  1252. gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
  1253. vgpu->id, value, offset);
  1254. return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
  1255. }
  1256. static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
  1257. unsigned int offset, void *p_data, unsigned int bytes)
  1258. {
  1259. u32 v = *(u32 *)p_data;
  1260. if (IS_BROXTON(vgpu->gvt->dev_priv))
  1261. v &= (1 << 31) | (1 << 29);
  1262. else
  1263. v &= (1 << 31) | (1 << 29) | (1 << 9) |
  1264. (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
  1265. v |= (v >> 1);
  1266. return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
  1267. }
  1268. static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
  1269. void *p_data, unsigned int bytes)
  1270. {
  1271. u32 v = *(u32 *)p_data;
  1272. /* other bits are MBZ. */
  1273. v &= (1 << 31) | (1 << 30);
  1274. v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
  1275. vgpu_vreg(vgpu, offset) = v;
  1276. return 0;
  1277. }
  1278. static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
  1279. unsigned int offset, void *p_data, unsigned int bytes)
  1280. {
  1281. u32 v = *(u32 *)p_data;
  1282. if (v & BXT_DE_PLL_PLL_ENABLE)
  1283. v |= BXT_DE_PLL_LOCK;
  1284. vgpu_vreg(vgpu, offset) = v;
  1285. return 0;
  1286. }
  1287. static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
  1288. unsigned int offset, void *p_data, unsigned int bytes)
  1289. {
  1290. u32 v = *(u32 *)p_data;
  1291. if (v & PORT_PLL_ENABLE)
  1292. v |= PORT_PLL_LOCK;
  1293. vgpu_vreg(vgpu, offset) = v;
  1294. return 0;
  1295. }
  1296. static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
  1297. unsigned int offset, void *p_data, unsigned int bytes)
  1298. {
  1299. u32 v = *(u32 *)p_data;
  1300. u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
  1301. switch (offset) {
  1302. case _PHY_CTL_FAMILY_EDP:
  1303. vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
  1304. break;
  1305. case _PHY_CTL_FAMILY_DDI:
  1306. vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
  1307. vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
  1308. break;
  1309. }
  1310. vgpu_vreg(vgpu, offset) = v;
  1311. return 0;
  1312. }
  1313. static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
  1314. unsigned int offset, void *p_data, unsigned int bytes)
  1315. {
  1316. u32 v = vgpu_vreg(vgpu, offset);
  1317. v &= ~UNIQUE_TRANGE_EN_METHOD;
  1318. vgpu_vreg(vgpu, offset) = v;
  1319. return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
  1320. }
  1321. static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
  1322. unsigned int offset, void *p_data, unsigned int bytes)
  1323. {
  1324. u32 v = *(u32 *)p_data;
  1325. if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
  1326. vgpu_vreg(vgpu, offset - 0x600) = v;
  1327. vgpu_vreg(vgpu, offset - 0x800) = v;
  1328. } else {
  1329. vgpu_vreg(vgpu, offset - 0x400) = v;
  1330. vgpu_vreg(vgpu, offset - 0x600) = v;
  1331. }
  1332. vgpu_vreg(vgpu, offset) = v;
  1333. return 0;
  1334. }
  1335. static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
  1336. unsigned int offset, void *p_data, unsigned int bytes)
  1337. {
  1338. u32 v = *(u32 *)p_data;
  1339. if (v & BIT(0)) {
  1340. vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
  1341. ~PHY_RESERVED;
  1342. vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
  1343. PHY_POWER_GOOD;
  1344. }
  1345. if (v & BIT(1)) {
  1346. vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
  1347. ~PHY_RESERVED;
  1348. vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
  1349. PHY_POWER_GOOD;
  1350. }
  1351. vgpu_vreg(vgpu, offset) = v;
  1352. return 0;
  1353. }
  1354. static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
  1355. unsigned int offset, void *p_data, unsigned int bytes)
  1356. {
  1357. vgpu_vreg(vgpu, offset) = 0;
  1358. return 0;
  1359. }
  1360. static int mmio_read_from_hw(struct intel_vgpu *vgpu,
  1361. unsigned int offset, void *p_data, unsigned int bytes)
  1362. {
  1363. struct intel_gvt *gvt = vgpu->gvt;
  1364. struct drm_i915_private *dev_priv = gvt->dev_priv;
  1365. int ring_id;
  1366. u32 ring_base;
  1367. ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
  1368. /**
  1369. * Read HW reg in following case
  1370. * a. the offset isn't a ring mmio
  1371. * b. the offset's ring is running on hw.
  1372. * c. the offset is ring time stamp mmio
  1373. */
  1374. if (ring_id >= 0)
  1375. ring_base = dev_priv->engine[ring_id]->mmio_base;
  1376. if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
  1377. offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
  1378. offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
  1379. mmio_hw_access_pre(dev_priv);
  1380. vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
  1381. mmio_hw_access_post(dev_priv);
  1382. }
  1383. return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
  1384. }
  1385. static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  1386. void *p_data, unsigned int bytes)
  1387. {
  1388. int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
  1389. struct intel_vgpu_execlist *execlist;
  1390. u32 data = *(u32 *)p_data;
  1391. int ret = 0;
  1392. if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
  1393. return -EINVAL;
  1394. execlist = &vgpu->submission.execlist[ring_id];
  1395. execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
  1396. if (execlist->elsp_dwords.index == 3) {
  1397. ret = intel_vgpu_submit_execlist(vgpu, ring_id);
  1398. if(ret)
  1399. gvt_vgpu_err("fail submit workload on ring %d\n",
  1400. ring_id);
  1401. }
  1402. ++execlist->elsp_dwords.index;
  1403. execlist->elsp_dwords.index &= 0x3;
  1404. return ret;
  1405. }
  1406. static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  1407. void *p_data, unsigned int bytes)
  1408. {
  1409. u32 data = *(u32 *)p_data;
  1410. int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
  1411. bool enable_execlist;
  1412. int ret;
  1413. write_vreg(vgpu, offset, p_data, bytes);
  1414. /* when PPGTT mode enabled, we will check if guest has called
  1415. * pvinfo, if not, we will treat this guest as non-gvtg-aware
  1416. * guest, and stop emulating its cfg space, mmio, gtt, etc.
  1417. */
  1418. if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
  1419. (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
  1420. && !vgpu->pv_notified) {
  1421. enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
  1422. return 0;
  1423. }
  1424. if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
  1425. || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
  1426. enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
  1427. gvt_dbg_core("EXECLIST %s on ring %d\n",
  1428. (enable_execlist ? "enabling" : "disabling"),
  1429. ring_id);
  1430. if (!enable_execlist)
  1431. return 0;
  1432. ret = intel_vgpu_select_submission_ops(vgpu,
  1433. ENGINE_MASK(ring_id),
  1434. INTEL_VGPU_EXECLIST_SUBMISSION);
  1435. if (ret)
  1436. return ret;
  1437. intel_vgpu_start_schedule(vgpu);
  1438. }
  1439. return 0;
  1440. }
  1441. static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
  1442. unsigned int offset, void *p_data, unsigned int bytes)
  1443. {
  1444. unsigned int id = 0;
  1445. write_vreg(vgpu, offset, p_data, bytes);
  1446. vgpu_vreg(vgpu, offset) = 0;
  1447. switch (offset) {
  1448. case 0x4260:
  1449. id = RCS;
  1450. break;
  1451. case 0x4264:
  1452. id = VCS;
  1453. break;
  1454. case 0x4268:
  1455. id = VCS2;
  1456. break;
  1457. case 0x426c:
  1458. id = BCS;
  1459. break;
  1460. case 0x4270:
  1461. id = VECS;
  1462. break;
  1463. default:
  1464. return -EINVAL;
  1465. }
  1466. set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
  1467. return 0;
  1468. }
  1469. static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
  1470. unsigned int offset, void *p_data, unsigned int bytes)
  1471. {
  1472. u32 data;
  1473. write_vreg(vgpu, offset, p_data, bytes);
  1474. data = vgpu_vreg(vgpu, offset);
  1475. if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
  1476. data |= RESET_CTL_READY_TO_RESET;
  1477. else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
  1478. data &= ~RESET_CTL_READY_TO_RESET;
  1479. vgpu_vreg(vgpu, offset) = data;
  1480. return 0;
  1481. }
  1482. #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
  1483. ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
  1484. f, s, am, rm, d, r, w); \
  1485. if (ret) \
  1486. return ret; \
  1487. } while (0)
  1488. #define MMIO_D(reg, d) \
  1489. MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
  1490. #define MMIO_DH(reg, d, r, w) \
  1491. MMIO_F(reg, 4, 0, 0, 0, d, r, w)
  1492. #define MMIO_DFH(reg, d, f, r, w) \
  1493. MMIO_F(reg, 4, f, 0, 0, d, r, w)
  1494. #define MMIO_GM(reg, d, r, w) \
  1495. MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
  1496. #define MMIO_GM_RDR(reg, d, r, w) \
  1497. MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
  1498. #define MMIO_RO(reg, d, f, rm, r, w) \
  1499. MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
  1500. #define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
  1501. MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
  1502. MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
  1503. MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
  1504. MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
  1505. if (HAS_BSD2(dev_priv)) \
  1506. MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
  1507. } while (0)
  1508. #define MMIO_RING_D(prefix, d) \
  1509. MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
  1510. #define MMIO_RING_DFH(prefix, d, f, r, w) \
  1511. MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
  1512. #define MMIO_RING_GM(prefix, d, r, w) \
  1513. MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
  1514. #define MMIO_RING_GM_RDR(prefix, d, r, w) \
  1515. MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
  1516. #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
  1517. MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
  1518. static int init_generic_mmio_info(struct intel_gvt *gvt)
  1519. {
  1520. struct drm_i915_private *dev_priv = gvt->dev_priv;
  1521. int ret;
  1522. MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
  1523. intel_vgpu_reg_imr_handler);
  1524. MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
  1525. MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
  1526. MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
  1527. MMIO_D(SDEISR, D_ALL);
  1528. MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1529. MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
  1530. gamw_echo_dev_rw_ia_write);
  1531. MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
  1532. MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
  1533. MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
  1534. #define RING_REG(base) _MMIO((base) + 0x28)
  1535. MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1536. #undef RING_REG
  1537. #define RING_REG(base) _MMIO((base) + 0x134)
  1538. MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1539. #undef RING_REG
  1540. #define RING_REG(base) _MMIO((base) + 0x6c)
  1541. MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
  1542. #undef RING_REG
  1543. MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
  1544. MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
  1545. MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
  1546. MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
  1547. MMIO_D(GEN7_CXT_SIZE, D_ALL);
  1548. MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1549. MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1550. MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1551. MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
  1552. MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
  1553. /* RING MODE */
  1554. #define RING_REG(base) _MMIO((base) + 0x29c)
  1555. MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
  1556. ring_mode_mmio_write);
  1557. #undef RING_REG
  1558. MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
  1559. NULL, NULL);
  1560. MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
  1561. NULL, NULL);
  1562. MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
  1563. mmio_read_from_hw, NULL);
  1564. MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
  1565. mmio_read_from_hw, NULL);
  1566. MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1567. MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
  1568. NULL, NULL);
  1569. MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1570. MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1571. MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1572. MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1573. MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1574. MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1575. MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1576. MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1577. MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1578. MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
  1579. NULL, NULL);
  1580. MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
  1581. NULL, NULL);
  1582. MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
  1583. MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
  1584. MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
  1585. MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
  1586. MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
  1587. MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
  1588. MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
  1589. MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1590. MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1591. MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1592. /* display */
  1593. MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL);
  1594. MMIO_D(_MMIO(0x602a0), D_ALL);
  1595. MMIO_D(_MMIO(0x65050), D_ALL);
  1596. MMIO_D(_MMIO(0x650b4), D_ALL);
  1597. MMIO_D(_MMIO(0xc4040), D_ALL);
  1598. MMIO_D(DERRMR, D_ALL);
  1599. MMIO_D(PIPEDSL(PIPE_A), D_ALL);
  1600. MMIO_D(PIPEDSL(PIPE_B), D_ALL);
  1601. MMIO_D(PIPEDSL(PIPE_C), D_ALL);
  1602. MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
  1603. MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
  1604. MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
  1605. MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
  1606. MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
  1607. MMIO_D(PIPESTAT(PIPE_A), D_ALL);
  1608. MMIO_D(PIPESTAT(PIPE_B), D_ALL);
  1609. MMIO_D(PIPESTAT(PIPE_C), D_ALL);
  1610. MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
  1611. MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
  1612. MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
  1613. MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
  1614. MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
  1615. MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
  1616. MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
  1617. MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
  1618. MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
  1619. MMIO_D(CURCNTR(PIPE_A), D_ALL);
  1620. MMIO_D(CURCNTR(PIPE_B), D_ALL);
  1621. MMIO_D(CURCNTR(PIPE_C), D_ALL);
  1622. MMIO_D(CURPOS(PIPE_A), D_ALL);
  1623. MMIO_D(CURPOS(PIPE_B), D_ALL);
  1624. MMIO_D(CURPOS(PIPE_C), D_ALL);
  1625. MMIO_D(CURBASE(PIPE_A), D_ALL);
  1626. MMIO_D(CURBASE(PIPE_B), D_ALL);
  1627. MMIO_D(CURBASE(PIPE_C), D_ALL);
  1628. MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL);
  1629. MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL);
  1630. MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL);
  1631. MMIO_D(_MMIO(0x700ac), D_ALL);
  1632. MMIO_D(_MMIO(0x710ac), D_ALL);
  1633. MMIO_D(_MMIO(0x720ac), D_ALL);
  1634. MMIO_D(_MMIO(0x70090), D_ALL);
  1635. MMIO_D(_MMIO(0x70094), D_ALL);
  1636. MMIO_D(_MMIO(0x70098), D_ALL);
  1637. MMIO_D(_MMIO(0x7009c), D_ALL);
  1638. MMIO_D(DSPCNTR(PIPE_A), D_ALL);
  1639. MMIO_D(DSPADDR(PIPE_A), D_ALL);
  1640. MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
  1641. MMIO_D(DSPPOS(PIPE_A), D_ALL);
  1642. MMIO_D(DSPSIZE(PIPE_A), D_ALL);
  1643. MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
  1644. MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
  1645. MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
  1646. MMIO_D(DSPCNTR(PIPE_B), D_ALL);
  1647. MMIO_D(DSPADDR(PIPE_B), D_ALL);
  1648. MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
  1649. MMIO_D(DSPPOS(PIPE_B), D_ALL);
  1650. MMIO_D(DSPSIZE(PIPE_B), D_ALL);
  1651. MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
  1652. MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
  1653. MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
  1654. MMIO_D(DSPCNTR(PIPE_C), D_ALL);
  1655. MMIO_D(DSPADDR(PIPE_C), D_ALL);
  1656. MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
  1657. MMIO_D(DSPPOS(PIPE_C), D_ALL);
  1658. MMIO_D(DSPSIZE(PIPE_C), D_ALL);
  1659. MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
  1660. MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
  1661. MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
  1662. MMIO_D(SPRCTL(PIPE_A), D_ALL);
  1663. MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
  1664. MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
  1665. MMIO_D(SPRPOS(PIPE_A), D_ALL);
  1666. MMIO_D(SPRSIZE(PIPE_A), D_ALL);
  1667. MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
  1668. MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
  1669. MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
  1670. MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
  1671. MMIO_D(SPROFFSET(PIPE_A), D_ALL);
  1672. MMIO_D(SPRSCALE(PIPE_A), D_ALL);
  1673. MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
  1674. MMIO_D(SPRCTL(PIPE_B), D_ALL);
  1675. MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
  1676. MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
  1677. MMIO_D(SPRPOS(PIPE_B), D_ALL);
  1678. MMIO_D(SPRSIZE(PIPE_B), D_ALL);
  1679. MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
  1680. MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
  1681. MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
  1682. MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
  1683. MMIO_D(SPROFFSET(PIPE_B), D_ALL);
  1684. MMIO_D(SPRSCALE(PIPE_B), D_ALL);
  1685. MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
  1686. MMIO_D(SPRCTL(PIPE_C), D_ALL);
  1687. MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
  1688. MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
  1689. MMIO_D(SPRPOS(PIPE_C), D_ALL);
  1690. MMIO_D(SPRSIZE(PIPE_C), D_ALL);
  1691. MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
  1692. MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
  1693. MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
  1694. MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
  1695. MMIO_D(SPROFFSET(PIPE_C), D_ALL);
  1696. MMIO_D(SPRSCALE(PIPE_C), D_ALL);
  1697. MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
  1698. MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
  1699. MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
  1700. MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
  1701. MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
  1702. MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
  1703. MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
  1704. MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
  1705. MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
  1706. MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
  1707. MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
  1708. MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
  1709. MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
  1710. MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
  1711. MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
  1712. MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
  1713. MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
  1714. MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
  1715. MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
  1716. MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
  1717. MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
  1718. MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
  1719. MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
  1720. MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
  1721. MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
  1722. MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
  1723. MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
  1724. MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
  1725. MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
  1726. MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
  1727. MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
  1728. MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
  1729. MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
  1730. MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
  1731. MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
  1732. MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
  1733. MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
  1734. MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
  1735. MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
  1736. MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
  1737. MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
  1738. MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
  1739. MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
  1740. MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
  1741. MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
  1742. MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
  1743. MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
  1744. MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
  1745. MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
  1746. MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
  1747. MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
  1748. MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
  1749. MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
  1750. MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
  1751. MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
  1752. MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
  1753. MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
  1754. MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
  1755. MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
  1756. MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
  1757. MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
  1758. MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
  1759. MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
  1760. MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
  1761. MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
  1762. MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
  1763. MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
  1764. MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
  1765. MMIO_D(PF_CTL(PIPE_A), D_ALL);
  1766. MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
  1767. MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
  1768. MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
  1769. MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
  1770. MMIO_D(PF_CTL(PIPE_B), D_ALL);
  1771. MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
  1772. MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
  1773. MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
  1774. MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
  1775. MMIO_D(PF_CTL(PIPE_C), D_ALL);
  1776. MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
  1777. MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
  1778. MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
  1779. MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
  1780. MMIO_D(WM0_PIPEA_ILK, D_ALL);
  1781. MMIO_D(WM0_PIPEB_ILK, D_ALL);
  1782. MMIO_D(WM0_PIPEC_IVB, D_ALL);
  1783. MMIO_D(WM1_LP_ILK, D_ALL);
  1784. MMIO_D(WM2_LP_ILK, D_ALL);
  1785. MMIO_D(WM3_LP_ILK, D_ALL);
  1786. MMIO_D(WM1S_LP_ILK, D_ALL);
  1787. MMIO_D(WM2S_LP_IVB, D_ALL);
  1788. MMIO_D(WM3S_LP_IVB, D_ALL);
  1789. MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
  1790. MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
  1791. MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
  1792. MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
  1793. MMIO_D(_MMIO(0x48268), D_ALL);
  1794. MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
  1795. gmbus_mmio_write);
  1796. MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
  1797. MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
  1798. MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
  1799. dp_aux_ch_ctl_mmio_write);
  1800. MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
  1801. dp_aux_ch_ctl_mmio_write);
  1802. MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
  1803. dp_aux_ch_ctl_mmio_write);
  1804. MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
  1805. MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
  1806. MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
  1807. MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
  1808. MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
  1809. MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
  1810. MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
  1811. MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
  1812. MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
  1813. MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
  1814. MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
  1815. MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
  1816. MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL);
  1817. MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL);
  1818. MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL);
  1819. MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL);
  1820. MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL);
  1821. MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL);
  1822. MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL);
  1823. MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL);
  1824. MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL);
  1825. MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL);
  1826. MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL);
  1827. MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL);
  1828. MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL);
  1829. MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL);
  1830. MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL);
  1831. MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL);
  1832. MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL);
  1833. MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL);
  1834. MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL);
  1835. MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL);
  1836. MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL);
  1837. MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL);
  1838. MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
  1839. MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
  1840. MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
  1841. MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
  1842. MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
  1843. MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
  1844. MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
  1845. MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
  1846. MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
  1847. MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
  1848. MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
  1849. MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
  1850. MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL);
  1851. MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL);
  1852. MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL);
  1853. MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL);
  1854. MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL);
  1855. MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL);
  1856. MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
  1857. MMIO_D(PCH_PP_DIVISOR, D_ALL);
  1858. MMIO_D(PCH_PP_STATUS, D_ALL);
  1859. MMIO_D(PCH_LVDS, D_ALL);
  1860. MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL);
  1861. MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL);
  1862. MMIO_D(_MMIO(_PCH_FPA0), D_ALL);
  1863. MMIO_D(_MMIO(_PCH_FPA1), D_ALL);
  1864. MMIO_D(_MMIO(_PCH_FPB0), D_ALL);
  1865. MMIO_D(_MMIO(_PCH_FPB1), D_ALL);
  1866. MMIO_D(PCH_DREF_CONTROL, D_ALL);
  1867. MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
  1868. MMIO_D(PCH_DPLL_SEL, D_ALL);
  1869. MMIO_D(_MMIO(0x61208), D_ALL);
  1870. MMIO_D(_MMIO(0x6120c), D_ALL);
  1871. MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
  1872. MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
  1873. MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
  1874. MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
  1875. MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
  1876. MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
  1877. MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
  1878. MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
  1879. MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
  1880. PORTA_HOTPLUG_STATUS_MASK
  1881. | PORTB_HOTPLUG_STATUS_MASK
  1882. | PORTC_HOTPLUG_STATUS_MASK
  1883. | PORTD_HOTPLUG_STATUS_MASK,
  1884. NULL, NULL);
  1885. MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
  1886. MMIO_D(FUSE_STRAP, D_ALL);
  1887. MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
  1888. MMIO_D(DISP_ARB_CTL, D_ALL);
  1889. MMIO_D(DISP_ARB_CTL2, D_ALL);
  1890. MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
  1891. MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
  1892. MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
  1893. MMIO_D(SOUTH_CHICKEN1, D_ALL);
  1894. MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
  1895. MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL);
  1896. MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL);
  1897. MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
  1898. MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL);
  1899. MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL);
  1900. MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
  1901. MMIO_D(ILK_DPFC_CONTROL, D_ALL);
  1902. MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL);
  1903. MMIO_D(ILK_DPFC_STATUS, D_ALL);
  1904. MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL);
  1905. MMIO_D(ILK_DPFC_CHICKEN, D_ALL);
  1906. MMIO_D(ILK_FBC_RT_BASE, D_ALL);
  1907. MMIO_D(IPS_CTL, D_ALL);
  1908. MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
  1909. MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
  1910. MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
  1911. MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
  1912. MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
  1913. MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
  1914. MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
  1915. MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
  1916. MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
  1917. MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
  1918. MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
  1919. MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
  1920. MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
  1921. MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
  1922. MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
  1923. MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
  1924. MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
  1925. MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
  1926. MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
  1927. MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
  1928. MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
  1929. MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
  1930. MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
  1931. MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
  1932. MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
  1933. MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
  1934. MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
  1935. MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
  1936. MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
  1937. MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
  1938. MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
  1939. MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
  1940. MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
  1941. MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
  1942. MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
  1943. MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
  1944. MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
  1945. MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
  1946. MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
  1947. MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
  1948. MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
  1949. MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
  1950. MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
  1951. MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
  1952. MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
  1953. MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
  1954. MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
  1955. MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
  1956. MMIO_D(_MMIO(0x60110), D_ALL);
  1957. MMIO_D(_MMIO(0x61110), D_ALL);
  1958. MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
  1959. MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
  1960. MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
  1961. MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1962. MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1963. MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1964. MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1965. MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1966. MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1967. MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
  1968. MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
  1969. MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
  1970. MMIO_D(SPLL_CTL, D_ALL);
  1971. MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
  1972. MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
  1973. MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
  1974. MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
  1975. MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
  1976. MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
  1977. MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
  1978. MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
  1979. MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
  1980. MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
  1981. MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
  1982. MMIO_D(_MMIO(0x46508), D_ALL);
  1983. MMIO_D(_MMIO(0x49080), D_ALL);
  1984. MMIO_D(_MMIO(0x49180), D_ALL);
  1985. MMIO_D(_MMIO(0x49280), D_ALL);
  1986. MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
  1987. MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
  1988. MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
  1989. MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
  1990. MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
  1991. MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
  1992. MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
  1993. MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
  1994. MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
  1995. MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
  1996. MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
  1997. MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
  1998. MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
  1999. MMIO_D(SBI_ADDR, D_ALL);
  2000. MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
  2001. MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
  2002. MMIO_D(PIXCLK_GATE, D_ALL);
  2003. MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
  2004. dp_aux_ch_ctl_mmio_write);
  2005. MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  2006. MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  2007. MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  2008. MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  2009. MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  2010. MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
  2011. MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
  2012. MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
  2013. MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
  2014. MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
  2015. MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
  2016. MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
  2017. MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
  2018. MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
  2019. MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
  2020. MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  2021. MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  2022. MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  2023. MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  2024. MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  2025. MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
  2026. MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
  2027. MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL);
  2028. MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
  2029. MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
  2030. MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
  2031. MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
  2032. MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL);
  2033. MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL);
  2034. MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL);
  2035. MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL);
  2036. MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
  2037. MMIO_D(FORCEWAKE_ACK, D_ALL);
  2038. MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
  2039. MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
  2040. MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2041. MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2042. MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
  2043. MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
  2044. MMIO_D(ECOBUS, D_ALL);
  2045. MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
  2046. MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
  2047. MMIO_D(GEN6_RPNSWREQ, D_ALL);
  2048. MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
  2049. MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
  2050. MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
  2051. MMIO_D(GEN6_RPSTAT1, D_ALL);
  2052. MMIO_D(GEN6_RP_CONTROL, D_ALL);
  2053. MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
  2054. MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
  2055. MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
  2056. MMIO_D(GEN6_RP_CUR_UP, D_ALL);
  2057. MMIO_D(GEN6_RP_PREV_UP, D_ALL);
  2058. MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
  2059. MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
  2060. MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
  2061. MMIO_D(GEN6_RP_UP_EI, D_ALL);
  2062. MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
  2063. MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
  2064. MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
  2065. MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
  2066. MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
  2067. MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
  2068. MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
  2069. MMIO_D(GEN6_RC_SLEEP, D_ALL);
  2070. MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
  2071. MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
  2072. MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
  2073. MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
  2074. MMIO_D(GEN6_PMINTRMSK, D_ALL);
  2075. MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
  2076. MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
  2077. MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
  2078. MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
  2079. MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
  2080. MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
  2081. MMIO_D(RSTDBYCTL, D_ALL);
  2082. MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
  2083. MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
  2084. MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
  2085. MMIO_D(TILECTL, D_ALL);
  2086. MMIO_D(GEN6_UCGCTL1, D_ALL);
  2087. MMIO_D(GEN6_UCGCTL2, D_ALL);
  2088. MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL);
  2089. MMIO_D(GEN6_PCODE_DATA, D_ALL);
  2090. MMIO_D(_MMIO(0x13812c), D_ALL);
  2091. MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
  2092. MMIO_D(HSW_EDRAM_CAP, D_ALL);
  2093. MMIO_D(HSW_IDICR, D_ALL);
  2094. MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
  2095. MMIO_D(_MMIO(0x3c), D_ALL);
  2096. MMIO_D(_MMIO(0x860), D_ALL);
  2097. MMIO_D(ECOSKPD, D_ALL);
  2098. MMIO_D(_MMIO(0x121d0), D_ALL);
  2099. MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
  2100. MMIO_D(_MMIO(0x41d0), D_ALL);
  2101. MMIO_D(GAC_ECO_BITS, D_ALL);
  2102. MMIO_D(_MMIO(0x6200), D_ALL);
  2103. MMIO_D(_MMIO(0x6204), D_ALL);
  2104. MMIO_D(_MMIO(0x6208), D_ALL);
  2105. MMIO_D(_MMIO(0x7118), D_ALL);
  2106. MMIO_D(_MMIO(0x7180), D_ALL);
  2107. MMIO_D(_MMIO(0x7408), D_ALL);
  2108. MMIO_D(_MMIO(0x7c00), D_ALL);
  2109. MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
  2110. MMIO_D(_MMIO(0x911c), D_ALL);
  2111. MMIO_D(_MMIO(0x9120), D_ALL);
  2112. MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2113. MMIO_D(GAB_CTL, D_ALL);
  2114. MMIO_D(_MMIO(0x48800), D_ALL);
  2115. MMIO_D(_MMIO(0xce044), D_ALL);
  2116. MMIO_D(_MMIO(0xe6500), D_ALL);
  2117. MMIO_D(_MMIO(0xe6504), D_ALL);
  2118. MMIO_D(_MMIO(0xe6600), D_ALL);
  2119. MMIO_D(_MMIO(0xe6604), D_ALL);
  2120. MMIO_D(_MMIO(0xe6700), D_ALL);
  2121. MMIO_D(_MMIO(0xe6704), D_ALL);
  2122. MMIO_D(_MMIO(0xe6800), D_ALL);
  2123. MMIO_D(_MMIO(0xe6804), D_ALL);
  2124. MMIO_D(PCH_GMBUS4, D_ALL);
  2125. MMIO_D(PCH_GMBUS5, D_ALL);
  2126. MMIO_D(_MMIO(0x902c), D_ALL);
  2127. MMIO_D(_MMIO(0xec008), D_ALL);
  2128. MMIO_D(_MMIO(0xec00c), D_ALL);
  2129. MMIO_D(_MMIO(0xec008 + 0x18), D_ALL);
  2130. MMIO_D(_MMIO(0xec00c + 0x18), D_ALL);
  2131. MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL);
  2132. MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL);
  2133. MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL);
  2134. MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL);
  2135. MMIO_D(_MMIO(0xec408), D_ALL);
  2136. MMIO_D(_MMIO(0xec40c), D_ALL);
  2137. MMIO_D(_MMIO(0xec408 + 0x18), D_ALL);
  2138. MMIO_D(_MMIO(0xec40c + 0x18), D_ALL);
  2139. MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL);
  2140. MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL);
  2141. MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL);
  2142. MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL);
  2143. MMIO_D(_MMIO(0xfc810), D_ALL);
  2144. MMIO_D(_MMIO(0xfc81c), D_ALL);
  2145. MMIO_D(_MMIO(0xfc828), D_ALL);
  2146. MMIO_D(_MMIO(0xfc834), D_ALL);
  2147. MMIO_D(_MMIO(0xfcc00), D_ALL);
  2148. MMIO_D(_MMIO(0xfcc0c), D_ALL);
  2149. MMIO_D(_MMIO(0xfcc18), D_ALL);
  2150. MMIO_D(_MMIO(0xfcc24), D_ALL);
  2151. MMIO_D(_MMIO(0xfd000), D_ALL);
  2152. MMIO_D(_MMIO(0xfd00c), D_ALL);
  2153. MMIO_D(_MMIO(0xfd018), D_ALL);
  2154. MMIO_D(_MMIO(0xfd024), D_ALL);
  2155. MMIO_D(_MMIO(0xfd034), D_ALL);
  2156. MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
  2157. MMIO_D(_MMIO(0x2054), D_ALL);
  2158. MMIO_D(_MMIO(0x12054), D_ALL);
  2159. MMIO_D(_MMIO(0x22054), D_ALL);
  2160. MMIO_D(_MMIO(0x1a054), D_ALL);
  2161. MMIO_D(_MMIO(0x44070), D_ALL);
  2162. MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2163. MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
  2164. MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
  2165. MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
  2166. MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
  2167. MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
  2168. MMIO_D(_MMIO(0x2b00), D_BDW_PLUS);
  2169. MMIO_D(_MMIO(0x2360), D_BDW_PLUS);
  2170. MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2171. MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2172. MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2173. MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2174. MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2175. MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2176. MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2177. MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2178. MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2179. MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2180. MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2181. MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2182. MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2183. MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2184. MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2185. MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2186. MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
  2187. MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  2188. MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  2189. MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  2190. MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  2191. MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  2192. MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2193. MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2194. MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
  2195. MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
  2196. MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
  2197. MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
  2198. MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2199. MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
  2200. MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2201. MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2202. MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2203. MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2204. MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
  2205. MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
  2206. return 0;
  2207. }
  2208. static int init_broadwell_mmio_info(struct intel_gvt *gvt)
  2209. {
  2210. struct drm_i915_private *dev_priv = gvt->dev_priv;
  2211. int ret;
  2212. MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2213. MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2214. MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2215. MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
  2216. MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2217. MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2218. MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2219. MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
  2220. MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2221. MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2222. MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2223. MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
  2224. MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2225. MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2226. MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2227. MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
  2228. MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
  2229. intel_vgpu_reg_imr_handler);
  2230. MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
  2231. intel_vgpu_reg_ier_handler);
  2232. MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
  2233. intel_vgpu_reg_iir_handler);
  2234. MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
  2235. MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
  2236. intel_vgpu_reg_imr_handler);
  2237. MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
  2238. intel_vgpu_reg_ier_handler);
  2239. MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
  2240. intel_vgpu_reg_iir_handler);
  2241. MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
  2242. MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
  2243. intel_vgpu_reg_imr_handler);
  2244. MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
  2245. intel_vgpu_reg_ier_handler);
  2246. MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
  2247. intel_vgpu_reg_iir_handler);
  2248. MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
  2249. MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2250. MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2251. MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2252. MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
  2253. MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2254. MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2255. MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2256. MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
  2257. MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  2258. MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  2259. MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  2260. MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
  2261. MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
  2262. intel_vgpu_reg_master_irq_handler);
  2263. MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
  2264. mmio_read_from_hw, NULL);
  2265. #define RING_REG(base) _MMIO((base) + 0xd0)
  2266. MMIO_RING_F(RING_REG, 4, F_RO, 0,
  2267. ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
  2268. ring_reset_ctl_write);
  2269. #undef RING_REG
  2270. #define RING_REG(base) _MMIO((base) + 0x230)
  2271. MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
  2272. #undef RING_REG
  2273. #define RING_REG(base) _MMIO((base) + 0x234)
  2274. MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
  2275. NULL, NULL);
  2276. #undef RING_REG
  2277. #define RING_REG(base) _MMIO((base) + 0x244)
  2278. MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2279. #undef RING_REG
  2280. #define RING_REG(base) _MMIO((base) + 0x370)
  2281. MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
  2282. #undef RING_REG
  2283. #define RING_REG(base) _MMIO((base) + 0x3a0)
  2284. MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
  2285. #undef RING_REG
  2286. MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
  2287. MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
  2288. MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
  2289. MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS);
  2290. MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
  2291. MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
  2292. MMIO_D(_MMIO(0x1c054), D_BDW_PLUS);
  2293. MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
  2294. MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
  2295. MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
  2296. MMIO_D(GAMTARBMODE, D_BDW_PLUS);
  2297. #define RING_REG(base) _MMIO((base) + 0x270)
  2298. MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
  2299. #undef RING_REG
  2300. MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
  2301. MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2302. MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
  2303. MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
  2304. MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
  2305. MMIO_D(WM_MISC, D_BDW);
  2306. MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
  2307. MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
  2308. MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
  2309. MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
  2310. MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
  2311. MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
  2312. MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
  2313. MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
  2314. MMIO_D(_MMIO(0xfdc), D_BDW_PLUS);
  2315. MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
  2316. NULL, NULL);
  2317. MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
  2318. NULL, NULL);
  2319. MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2320. MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
  2321. MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
  2322. MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2323. MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
  2324. MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
  2325. MMIO_D(_MMIO(0xb110), D_BDW);
  2326. MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
  2327. NULL, force_nonpriv_write);
  2328. MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
  2329. MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
  2330. MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
  2331. MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
  2332. MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
  2333. MMIO_D(_MMIO(0x110000), D_BDW_PLUS);
  2334. MMIO_D(_MMIO(0x48400), D_BDW_PLUS);
  2335. MMIO_D(_MMIO(0x6e570), D_BDW_PLUS);
  2336. MMIO_D(_MMIO(0x65f10), D_BDW_PLUS);
  2337. MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2338. MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2339. MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2340. MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2341. MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
  2342. MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2343. MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2344. MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2345. MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2346. MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2347. MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2348. MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2349. MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2350. MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2351. return 0;
  2352. }
  2353. static int init_skl_mmio_info(struct intel_gvt *gvt)
  2354. {
  2355. struct drm_i915_private *dev_priv = gvt->dev_priv;
  2356. int ret;
  2357. MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
  2358. MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
  2359. MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
  2360. MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
  2361. MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
  2362. MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
  2363. MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
  2364. dp_aux_ch_ctl_mmio_write);
  2365. MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
  2366. dp_aux_ch_ctl_mmio_write);
  2367. MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
  2368. dp_aux_ch_ctl_mmio_write);
  2369. MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
  2370. MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
  2371. MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
  2372. MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
  2373. MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
  2374. MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
  2375. MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
  2376. MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
  2377. MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
  2378. MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
  2379. MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
  2380. MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
  2381. MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
  2382. MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
  2383. MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
  2384. MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
  2385. MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
  2386. MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
  2387. MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
  2388. MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
  2389. MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
  2390. MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
  2391. MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
  2392. MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
  2393. MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
  2394. MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
  2395. MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
  2396. MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
  2397. MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
  2398. MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
  2399. MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
  2400. MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
  2401. MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
  2402. MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
  2403. MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
  2404. MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
  2405. MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
  2406. MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
  2407. MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
  2408. MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
  2409. MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
  2410. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
  2411. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
  2412. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
  2413. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
  2414. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
  2415. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
  2416. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
  2417. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
  2418. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
  2419. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
  2420. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
  2421. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
  2422. MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
  2423. MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
  2424. MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
  2425. MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2426. MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2427. MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2428. MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2429. MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2430. MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2431. MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2432. MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2433. MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2434. MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2435. MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2436. MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
  2437. MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
  2438. MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
  2439. MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
  2440. MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
  2441. MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
  2442. MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
  2443. MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
  2444. MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
  2445. MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
  2446. MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
  2447. MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
  2448. MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
  2449. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
  2450. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
  2451. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
  2452. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
  2453. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
  2454. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
  2455. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
  2456. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
  2457. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
  2458. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
  2459. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
  2460. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
  2461. MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
  2462. MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
  2463. MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
  2464. MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
  2465. MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
  2466. MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
  2467. MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
  2468. MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
  2469. MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
  2470. MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
  2471. MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
  2472. MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
  2473. MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
  2474. MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
  2475. MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
  2476. MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
  2477. MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
  2478. MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
  2479. MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
  2480. MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
  2481. MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
  2482. MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
  2483. MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
  2484. MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
  2485. MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
  2486. MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
  2487. MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
  2488. MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
  2489. MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
  2490. MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
  2491. MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
  2492. MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
  2493. MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
  2494. MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
  2495. MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
  2496. MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
  2497. NULL, NULL);
  2498. MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
  2499. NULL, NULL);
  2500. MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
  2501. MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
  2502. MMIO_D(RC6_LOCATION, D_SKL_PLUS);
  2503. MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
  2504. MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
  2505. NULL, NULL);
  2506. /* TRTT */
  2507. MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
  2508. MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
  2509. MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
  2510. MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
  2511. MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
  2512. MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
  2513. NULL, gen9_trtte_write);
  2514. MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
  2515. MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
  2516. MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
  2517. MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
  2518. MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
  2519. MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
  2520. MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
  2521. MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
  2522. MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
  2523. MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
  2524. MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
  2525. MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
  2526. MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
  2527. MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
  2528. MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
  2529. MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
  2530. MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
  2531. MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
  2532. MMIO_D(_MMIO(0x72034), D_SKL_PLUS);
  2533. MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
  2534. MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
  2535. MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
  2536. MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
  2537. MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
  2538. MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
  2539. MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
  2540. MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
  2541. MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
  2542. MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
  2543. MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
  2544. MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
  2545. NULL, NULL);
  2546. MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
  2547. NULL, NULL);
  2548. MMIO_D(_MMIO(0x4ab8), D_KBL);
  2549. MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
  2550. return 0;
  2551. }
  2552. static int init_bxt_mmio_info(struct intel_gvt *gvt)
  2553. {
  2554. struct drm_i915_private *dev_priv = gvt->dev_priv;
  2555. int ret;
  2556. MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
  2557. MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
  2558. MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
  2559. MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
  2560. MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
  2561. MMIO_D(ERROR_GEN6, D_BXT);
  2562. MMIO_D(DONE_REG, D_BXT);
  2563. MMIO_D(EIR, D_BXT);
  2564. MMIO_D(PGTBL_ER, D_BXT);
  2565. MMIO_D(_MMIO(0x4194), D_BXT);
  2566. MMIO_D(_MMIO(0x4294), D_BXT);
  2567. MMIO_D(_MMIO(0x4494), D_BXT);
  2568. MMIO_RING_D(RING_PSMI_CTL, D_BXT);
  2569. MMIO_RING_D(RING_DMA_FADD, D_BXT);
  2570. MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
  2571. MMIO_RING_D(RING_IPEHR, D_BXT);
  2572. MMIO_RING_D(RING_INSTPS, D_BXT);
  2573. MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
  2574. MMIO_RING_D(RING_BBSTATE, D_BXT);
  2575. MMIO_RING_D(RING_IPEIR, D_BXT);
  2576. MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
  2577. MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
  2578. MMIO_D(BXT_RP_STATE_CAP, D_BXT);
  2579. MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
  2580. NULL, bxt_phy_ctl_family_write);
  2581. MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
  2582. NULL, bxt_phy_ctl_family_write);
  2583. MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
  2584. MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
  2585. MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
  2586. MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
  2587. NULL, bxt_port_pll_enable_write);
  2588. MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
  2589. NULL, bxt_port_pll_enable_write);
  2590. MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
  2591. bxt_port_pll_enable_write);
  2592. MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
  2593. MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
  2594. MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
  2595. MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
  2596. MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
  2597. MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
  2598. MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
  2599. MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
  2600. MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
  2601. MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
  2602. MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
  2603. MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
  2604. MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
  2605. MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
  2606. MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
  2607. MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
  2608. MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
  2609. MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
  2610. MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
  2611. MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
  2612. MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
  2613. MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
  2614. MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
  2615. MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
  2616. MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
  2617. NULL, bxt_pcs_dw12_grp_write);
  2618. MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
  2619. MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
  2620. MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
  2621. bxt_port_tx_dw3_read, NULL);
  2622. MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
  2623. MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
  2624. MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
  2625. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
  2626. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
  2627. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
  2628. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
  2629. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
  2630. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
  2631. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
  2632. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
  2633. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
  2634. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
  2635. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
  2636. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
  2637. MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
  2638. MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
  2639. MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
  2640. MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
  2641. MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
  2642. MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
  2643. MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
  2644. NULL, bxt_pcs_dw12_grp_write);
  2645. MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
  2646. MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
  2647. MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
  2648. bxt_port_tx_dw3_read, NULL);
  2649. MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
  2650. MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
  2651. MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
  2652. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
  2653. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
  2654. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
  2655. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
  2656. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
  2657. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
  2658. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
  2659. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
  2660. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
  2661. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
  2662. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
  2663. MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
  2664. MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
  2665. MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
  2666. MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
  2667. MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
  2668. MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
  2669. MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
  2670. MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
  2671. NULL, bxt_pcs_dw12_grp_write);
  2672. MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
  2673. MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
  2674. MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
  2675. bxt_port_tx_dw3_read, NULL);
  2676. MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
  2677. MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
  2678. MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
  2679. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
  2680. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
  2681. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
  2682. MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
  2683. MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
  2684. MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
  2685. MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
  2686. MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
  2687. MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
  2688. MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
  2689. MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
  2690. MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
  2691. MMIO_D(BXT_DE_PLL_CTL, D_BXT);
  2692. MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
  2693. MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
  2694. MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
  2695. MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
  2696. MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
  2697. MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
  2698. MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
  2699. MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
  2700. MMIO_D(RC6_CTX_BASE, D_BXT);
  2701. MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
  2702. MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
  2703. MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
  2704. MMIO_D(GEN6_GFXPAUSE, D_BXT);
  2705. MMIO_D(GEN8_L3SQCREG1, D_BXT);
  2706. MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
  2707. return 0;
  2708. }
  2709. static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
  2710. unsigned int offset)
  2711. {
  2712. unsigned long device = intel_gvt_get_device_type(gvt);
  2713. struct gvt_mmio_block *block = gvt->mmio.mmio_block;
  2714. int num = gvt->mmio.num_mmio_block;
  2715. int i;
  2716. for (i = 0; i < num; i++, block++) {
  2717. if (!(device & block->device))
  2718. continue;
  2719. if (offset >= i915_mmio_reg_offset(block->offset) &&
  2720. offset < i915_mmio_reg_offset(block->offset) + block->size)
  2721. return block;
  2722. }
  2723. return NULL;
  2724. }
  2725. /**
  2726. * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
  2727. * @gvt: GVT device
  2728. *
  2729. * This function is called at the driver unloading stage, to clean up the MMIO
  2730. * information table of GVT device
  2731. *
  2732. */
  2733. void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
  2734. {
  2735. struct hlist_node *tmp;
  2736. struct intel_gvt_mmio_info *e;
  2737. int i;
  2738. hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
  2739. kfree(e);
  2740. vfree(gvt->mmio.mmio_attribute);
  2741. gvt->mmio.mmio_attribute = NULL;
  2742. }
  2743. /* Special MMIO blocks. */
  2744. static struct gvt_mmio_block mmio_blocks[] = {
  2745. {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
  2746. {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
  2747. {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
  2748. pvinfo_mmio_read, pvinfo_mmio_write},
  2749. {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
  2750. {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
  2751. {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
  2752. };
  2753. /**
  2754. * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
  2755. * @gvt: GVT device
  2756. *
  2757. * This function is called at the initialization stage, to setup the MMIO
  2758. * information table for GVT device
  2759. *
  2760. * Returns:
  2761. * zero on success, negative if failed.
  2762. */
  2763. int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
  2764. {
  2765. struct intel_gvt_device_info *info = &gvt->device_info;
  2766. struct drm_i915_private *dev_priv = gvt->dev_priv;
  2767. int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
  2768. int ret;
  2769. gvt->mmio.mmio_attribute = vzalloc(size);
  2770. if (!gvt->mmio.mmio_attribute)
  2771. return -ENOMEM;
  2772. ret = init_generic_mmio_info(gvt);
  2773. if (ret)
  2774. goto err;
  2775. if (IS_BROADWELL(dev_priv)) {
  2776. ret = init_broadwell_mmio_info(gvt);
  2777. if (ret)
  2778. goto err;
  2779. } else if (IS_SKYLAKE(dev_priv)
  2780. || IS_KABYLAKE(dev_priv)) {
  2781. ret = init_broadwell_mmio_info(gvt);
  2782. if (ret)
  2783. goto err;
  2784. ret = init_skl_mmio_info(gvt);
  2785. if (ret)
  2786. goto err;
  2787. } else if (IS_BROXTON(dev_priv)) {
  2788. ret = init_broadwell_mmio_info(gvt);
  2789. if (ret)
  2790. goto err;
  2791. ret = init_skl_mmio_info(gvt);
  2792. if (ret)
  2793. goto err;
  2794. ret = init_bxt_mmio_info(gvt);
  2795. if (ret)
  2796. goto err;
  2797. }
  2798. gvt->mmio.mmio_block = mmio_blocks;
  2799. gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
  2800. return 0;
  2801. err:
  2802. intel_gvt_clean_mmio_info(gvt);
  2803. return ret;
  2804. }
  2805. /**
  2806. * intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
  2807. * @gvt: a GVT device
  2808. * @handler: the handler
  2809. * @data: private data given to handler
  2810. *
  2811. * Returns:
  2812. * Zero on success, negative error code if failed.
  2813. */
  2814. int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
  2815. int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
  2816. void *data)
  2817. {
  2818. struct gvt_mmio_block *block = gvt->mmio.mmio_block;
  2819. struct intel_gvt_mmio_info *e;
  2820. int i, j, ret;
  2821. hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
  2822. ret = handler(gvt, e->offset, data);
  2823. if (ret)
  2824. return ret;
  2825. }
  2826. for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
  2827. for (j = 0; j < block->size; j += 4) {
  2828. ret = handler(gvt,
  2829. i915_mmio_reg_offset(block->offset) + j,
  2830. data);
  2831. if (ret)
  2832. return ret;
  2833. }
  2834. }
  2835. return 0;
  2836. }
  2837. /**
  2838. * intel_vgpu_default_mmio_read - default MMIO read handler
  2839. * @vgpu: a vGPU
  2840. * @offset: access offset
  2841. * @p_data: data return buffer
  2842. * @bytes: access data length
  2843. *
  2844. * Returns:
  2845. * Zero on success, negative error code if failed.
  2846. */
  2847. int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  2848. void *p_data, unsigned int bytes)
  2849. {
  2850. read_vreg(vgpu, offset, p_data, bytes);
  2851. return 0;
  2852. }
  2853. /**
  2854. * intel_t_default_mmio_write - default MMIO write handler
  2855. * @vgpu: a vGPU
  2856. * @offset: access offset
  2857. * @p_data: write data buffer
  2858. * @bytes: access data length
  2859. *
  2860. * Returns:
  2861. * Zero on success, negative error code if failed.
  2862. */
  2863. int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  2864. void *p_data, unsigned int bytes)
  2865. {
  2866. write_vreg(vgpu, offset, p_data, bytes);
  2867. return 0;
  2868. }
  2869. /**
  2870. * intel_vgpu_mask_mmio_write - write mask register
  2871. * @vgpu: a vGPU
  2872. * @offset: access offset
  2873. * @p_data: write data buffer
  2874. * @bytes: access data length
  2875. *
  2876. * Returns:
  2877. * Zero on success, negative error code if failed.
  2878. */
  2879. int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  2880. void *p_data, unsigned int bytes)
  2881. {
  2882. u32 mask, old_vreg;
  2883. old_vreg = vgpu_vreg(vgpu, offset);
  2884. write_vreg(vgpu, offset, p_data, bytes);
  2885. mask = vgpu_vreg(vgpu, offset) >> 16;
  2886. vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
  2887. (vgpu_vreg(vgpu, offset) & mask);
  2888. return 0;
  2889. }
  2890. /**
  2891. * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
  2892. * force-nopriv register
  2893. *
  2894. * @gvt: a GVT device
  2895. * @offset: register offset
  2896. *
  2897. * Returns:
  2898. * True if the register is in force-nonpriv whitelist;
  2899. * False if outside;
  2900. */
  2901. bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
  2902. unsigned int offset)
  2903. {
  2904. return in_whitelist(offset);
  2905. }
  2906. /**
  2907. * intel_vgpu_mmio_reg_rw - emulate tracked mmio registers
  2908. * @vgpu: a vGPU
  2909. * @offset: register offset
  2910. * @pdata: data buffer
  2911. * @bytes: data length
  2912. * @is_read: read or write
  2913. *
  2914. * Returns:
  2915. * Zero on success, negative error code if failed.
  2916. */
  2917. int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
  2918. void *pdata, unsigned int bytes, bool is_read)
  2919. {
  2920. struct intel_gvt *gvt = vgpu->gvt;
  2921. struct intel_gvt_mmio_info *mmio_info;
  2922. struct gvt_mmio_block *mmio_block;
  2923. gvt_mmio_func func;
  2924. int ret;
  2925. if (WARN_ON(bytes > 8))
  2926. return -EINVAL;
  2927. /*
  2928. * Handle special MMIO blocks.
  2929. */
  2930. mmio_block = find_mmio_block(gvt, offset);
  2931. if (mmio_block) {
  2932. func = is_read ? mmio_block->read : mmio_block->write;
  2933. if (func)
  2934. return func(vgpu, offset, pdata, bytes);
  2935. goto default_rw;
  2936. }
  2937. /*
  2938. * Normal tracked MMIOs.
  2939. */
  2940. mmio_info = find_mmio_info(gvt, offset);
  2941. if (!mmio_info) {
  2942. gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes);
  2943. goto default_rw;
  2944. }
  2945. if (is_read)
  2946. return mmio_info->read(vgpu, offset, pdata, bytes);
  2947. else {
  2948. u64 ro_mask = mmio_info->ro_mask;
  2949. u32 old_vreg = 0, old_sreg = 0;
  2950. u64 data = 0;
  2951. if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
  2952. old_vreg = vgpu_vreg(vgpu, offset);
  2953. old_sreg = vgpu_sreg(vgpu, offset);
  2954. }
  2955. if (likely(!ro_mask))
  2956. ret = mmio_info->write(vgpu, offset, pdata, bytes);
  2957. else if (!~ro_mask) {
  2958. gvt_vgpu_err("try to write RO reg %x\n", offset);
  2959. return 0;
  2960. } else {
  2961. /* keep the RO bits in the virtual register */
  2962. memcpy(&data, pdata, bytes);
  2963. data &= ~ro_mask;
  2964. data |= vgpu_vreg(vgpu, offset) & ro_mask;
  2965. ret = mmio_info->write(vgpu, offset, &data, bytes);
  2966. }
  2967. /* higher 16bits of mode ctl regs are mask bits for change */
  2968. if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
  2969. u32 mask = vgpu_vreg(vgpu, offset) >> 16;
  2970. vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
  2971. | (vgpu_vreg(vgpu, offset) & mask);
  2972. vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
  2973. | (vgpu_sreg(vgpu, offset) & mask);
  2974. }
  2975. }
  2976. return ret;
  2977. default_rw:
  2978. return is_read ?
  2979. intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
  2980. intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
  2981. }