handlers.c 84 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848
  1. /*
  2. * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. * SOFTWARE.
  22. *
  23. * Authors:
  24. * Kevin Tian <kevin.tian@intel.com>
  25. * Eddie Dong <eddie.dong@intel.com>
  26. * Zhiyuan Lv <zhiyuan.lv@intel.com>
  27. *
  28. * Contributors:
  29. * Min He <min.he@intel.com>
  30. * Tina Zhang <tina.zhang@intel.com>
  31. * Pei Zhang <pei.zhang@intel.com>
  32. * Niu Bing <bing.niu@intel.com>
  33. * Ping Gao <ping.a.gao@intel.com>
  34. * Zhi Wang <zhi.a.wang@intel.com>
  35. *
  36. */
  37. #include "i915_drv.h"
  38. #include "gvt.h"
  39. #include "i915_pvinfo.h"
  40. /* XXX FIXME i915 has changed PP_XXX definition */
  41. #define PCH_PP_STATUS _MMIO(0xc7200)
  42. #define PCH_PP_CONTROL _MMIO(0xc7204)
  43. #define PCH_PP_ON_DELAYS _MMIO(0xc7208)
  44. #define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
  45. #define PCH_PP_DIVISOR _MMIO(0xc7210)
  46. /* Register contains RO bits */
  47. #define F_RO (1 << 0)
  48. /* Register contains graphics address */
  49. #define F_GMADR (1 << 1)
  50. /* Mode mask registers with high 16 bits as the mask bits */
  51. #define F_MODE_MASK (1 << 2)
  52. /* This reg can be accessed by GPU commands */
  53. #define F_CMD_ACCESS (1 << 3)
  54. /* This reg has been accessed by a VM */
  55. #define F_ACCESSED (1 << 4)
  56. /* This reg has been accessed through GPU commands */
  57. #define F_CMD_ACCESSED (1 << 5)
  58. /* This reg could be accessed by unaligned address */
  59. #define F_UNALIGN (1 << 6)
  60. unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
  61. {
  62. if (IS_BROADWELL(gvt->dev_priv))
  63. return D_BDW;
  64. else if (IS_SKYLAKE(gvt->dev_priv))
  65. return D_SKL;
  66. return 0;
  67. }
  68. bool intel_gvt_match_device(struct intel_gvt *gvt,
  69. unsigned long device)
  70. {
  71. return intel_gvt_get_device_type(gvt) & device;
  72. }
  73. static void read_vreg(struct intel_vgpu *vgpu, unsigned int offset,
  74. void *p_data, unsigned int bytes)
  75. {
  76. memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
  77. }
  78. static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
  79. void *p_data, unsigned int bytes)
  80. {
  81. memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
  82. }
  83. static int new_mmio_info(struct intel_gvt *gvt,
  84. u32 offset, u32 flags, u32 size,
  85. u32 addr_mask, u32 ro_mask, u32 device,
  86. void *read, void *write)
  87. {
  88. struct intel_gvt_mmio_info *info, *p;
  89. u32 start, end, i;
  90. if (!intel_gvt_match_device(gvt, device))
  91. return 0;
  92. if (WARN_ON(!IS_ALIGNED(offset, 4)))
  93. return -EINVAL;
  94. start = offset;
  95. end = offset + size;
  96. for (i = start; i < end; i += 4) {
  97. info = kzalloc(sizeof(*info), GFP_KERNEL);
  98. if (!info)
  99. return -ENOMEM;
  100. info->offset = i;
  101. p = intel_gvt_find_mmio_info(gvt, info->offset);
  102. if (p)
  103. gvt_err("dup mmio definition offset %x\n",
  104. info->offset);
  105. info->size = size;
  106. info->length = (i + 4) < end ? 4 : (end - i);
  107. info->addr_mask = addr_mask;
  108. info->device = device;
  109. info->read = read ? read : intel_vgpu_default_mmio_read;
  110. info->write = write ? write : intel_vgpu_default_mmio_write;
  111. gvt->mmio.mmio_attribute[info->offset / 4] = flags;
  112. INIT_HLIST_NODE(&info->node);
  113. hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
  114. }
  115. return 0;
  116. }
  117. static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
  118. {
  119. enum intel_engine_id id;
  120. struct intel_engine_cs *engine;
  121. reg &= ~GENMASK(11, 0);
  122. for_each_engine(engine, gvt->dev_priv, id) {
  123. if (engine->mmio_base == reg)
  124. return id;
  125. }
  126. return -1;
  127. }
  128. #define offset_to_fence_num(offset) \
  129. ((offset - i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) >> 3)
  130. #define fence_num_to_offset(num) \
  131. (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
  132. static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
  133. unsigned int fence_num, void *p_data, unsigned int bytes)
  134. {
  135. if (fence_num >= vgpu_fence_sz(vgpu)) {
  136. gvt_err("vgpu%d: found oob fence register access\n",
  137. vgpu->id);
  138. gvt_err("vgpu%d: total fence num %d access fence num %d\n",
  139. vgpu->id, vgpu_fence_sz(vgpu), fence_num);
  140. memset(p_data, 0, bytes);
  141. }
  142. return 0;
  143. }
  144. static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
  145. void *p_data, unsigned int bytes)
  146. {
  147. int ret;
  148. ret = sanitize_fence_mmio_access(vgpu, offset_to_fence_num(off),
  149. p_data, bytes);
  150. if (ret)
  151. return ret;
  152. read_vreg(vgpu, off, p_data, bytes);
  153. return 0;
  154. }
  155. static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
  156. void *p_data, unsigned int bytes)
  157. {
  158. unsigned int fence_num = offset_to_fence_num(off);
  159. int ret;
  160. ret = sanitize_fence_mmio_access(vgpu, fence_num, p_data, bytes);
  161. if (ret)
  162. return ret;
  163. write_vreg(vgpu, off, p_data, bytes);
  164. intel_vgpu_write_fence(vgpu, fence_num,
  165. vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
  166. return 0;
  167. }
  168. #define CALC_MODE_MASK_REG(old, new) \
  169. (((new) & GENMASK(31, 16)) \
  170. | ((((old) & GENMASK(15, 0)) & ~((new) >> 16)) \
  171. | ((new) & ((new) >> 16))))
  172. static int mul_force_wake_write(struct intel_vgpu *vgpu,
  173. unsigned int offset, void *p_data, unsigned int bytes)
  174. {
  175. u32 old, new;
  176. uint32_t ack_reg_offset;
  177. old = vgpu_vreg(vgpu, offset);
  178. new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
  179. if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
  180. switch (offset) {
  181. case FORCEWAKE_RENDER_GEN9_REG:
  182. ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
  183. break;
  184. case FORCEWAKE_BLITTER_GEN9_REG:
  185. ack_reg_offset = FORCEWAKE_ACK_BLITTER_GEN9_REG;
  186. break;
  187. case FORCEWAKE_MEDIA_GEN9_REG:
  188. ack_reg_offset = FORCEWAKE_ACK_MEDIA_GEN9_REG;
  189. break;
  190. default:
  191. /*should not hit here*/
  192. gvt_err("invalid forcewake offset 0x%x\n", offset);
  193. return 1;
  194. }
  195. } else {
  196. ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
  197. }
  198. vgpu_vreg(vgpu, offset) = new;
  199. vgpu_vreg(vgpu, ack_reg_offset) = (new & GENMASK(15, 0));
  200. return 0;
  201. }
  202. static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
  203. void *p_data, unsigned int bytes, unsigned long bitmap)
  204. {
  205. struct intel_gvt_workload_scheduler *scheduler =
  206. &vgpu->gvt->scheduler;
  207. vgpu->resetting = true;
  208. intel_vgpu_stop_schedule(vgpu);
  209. /*
  210. * The current_vgpu will set to NULL after stopping the
  211. * scheduler when the reset is triggered by current vgpu.
  212. */
  213. if (scheduler->current_vgpu == NULL) {
  214. mutex_unlock(&vgpu->gvt->lock);
  215. intel_gvt_wait_vgpu_idle(vgpu);
  216. mutex_lock(&vgpu->gvt->lock);
  217. }
  218. intel_vgpu_reset_execlist(vgpu, bitmap);
  219. /* full GPU reset */
  220. if (bitmap == 0xff) {
  221. mutex_unlock(&vgpu->gvt->lock);
  222. intel_vgpu_clean_gtt(vgpu);
  223. mutex_lock(&vgpu->gvt->lock);
  224. setup_vgpu_mmio(vgpu);
  225. populate_pvinfo_page(vgpu);
  226. intel_vgpu_init_gtt(vgpu);
  227. }
  228. vgpu->resetting = false;
  229. return 0;
  230. }
  231. static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  232. void *p_data, unsigned int bytes)
  233. {
  234. u32 data;
  235. u64 bitmap = 0;
  236. write_vreg(vgpu, offset, p_data, bytes);
  237. data = vgpu_vreg(vgpu, offset);
  238. if (data & GEN6_GRDOM_FULL) {
  239. gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
  240. bitmap = 0xff;
  241. }
  242. if (data & GEN6_GRDOM_RENDER) {
  243. gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
  244. bitmap |= (1 << RCS);
  245. }
  246. if (data & GEN6_GRDOM_MEDIA) {
  247. gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
  248. bitmap |= (1 << VCS);
  249. }
  250. if (data & GEN6_GRDOM_BLT) {
  251. gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
  252. bitmap |= (1 << BCS);
  253. }
  254. if (data & GEN6_GRDOM_VECS) {
  255. gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
  256. bitmap |= (1 << VECS);
  257. }
  258. if (data & GEN8_GRDOM_MEDIA2) {
  259. gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
  260. if (HAS_BSD2(vgpu->gvt->dev_priv))
  261. bitmap |= (1 << VCS2);
  262. }
  263. return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
  264. }
  265. static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  266. void *p_data, unsigned int bytes)
  267. {
  268. return intel_gvt_i2c_handle_gmbus_read(vgpu, offset, p_data, bytes);
  269. }
  270. static int gmbus_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  271. void *p_data, unsigned int bytes)
  272. {
  273. return intel_gvt_i2c_handle_gmbus_write(vgpu, offset, p_data, bytes);
  274. }
  275. static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
  276. unsigned int offset, void *p_data, unsigned int bytes)
  277. {
  278. write_vreg(vgpu, offset, p_data, bytes);
  279. if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
  280. vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON;
  281. vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
  282. vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
  283. vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
  284. } else
  285. vgpu_vreg(vgpu, PCH_PP_STATUS) &=
  286. ~(PP_ON | PP_SEQUENCE_POWER_DOWN
  287. | PP_CYCLE_DELAY_ACTIVE);
  288. return 0;
  289. }
  290. static int transconf_mmio_write(struct intel_vgpu *vgpu,
  291. unsigned int offset, void *p_data, unsigned int bytes)
  292. {
  293. write_vreg(vgpu, offset, p_data, bytes);
  294. if (vgpu_vreg(vgpu, offset) & TRANS_ENABLE)
  295. vgpu_vreg(vgpu, offset) |= TRANS_STATE_ENABLE;
  296. else
  297. vgpu_vreg(vgpu, offset) &= ~TRANS_STATE_ENABLE;
  298. return 0;
  299. }
  300. static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  301. void *p_data, unsigned int bytes)
  302. {
  303. write_vreg(vgpu, offset, p_data, bytes);
  304. if (vgpu_vreg(vgpu, offset) & LCPLL_PLL_DISABLE)
  305. vgpu_vreg(vgpu, offset) &= ~LCPLL_PLL_LOCK;
  306. else
  307. vgpu_vreg(vgpu, offset) |= LCPLL_PLL_LOCK;
  308. if (vgpu_vreg(vgpu, offset) & LCPLL_CD_SOURCE_FCLK)
  309. vgpu_vreg(vgpu, offset) |= LCPLL_CD_SOURCE_FCLK_DONE;
  310. else
  311. vgpu_vreg(vgpu, offset) &= ~LCPLL_CD_SOURCE_FCLK_DONE;
  312. return 0;
  313. }
  314. static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  315. void *p_data, unsigned int bytes)
  316. {
  317. *(u32 *)p_data = (1 << 17);
  318. return 0;
  319. }
  320. static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset,
  321. void *p_data, unsigned int bytes)
  322. {
  323. *(u32 *)p_data = 3;
  324. return 0;
  325. }
  326. static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset,
  327. void *p_data, unsigned int bytes)
  328. {
  329. *(u32 *)p_data = (0x2f << 16);
  330. return 0;
  331. }
  332. static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  333. void *p_data, unsigned int bytes)
  334. {
  335. u32 data;
  336. write_vreg(vgpu, offset, p_data, bytes);
  337. data = vgpu_vreg(vgpu, offset);
  338. if (data & PIPECONF_ENABLE)
  339. vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
  340. else
  341. vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
  342. intel_gvt_check_vblank_emulation(vgpu->gvt);
  343. return 0;
  344. }
  345. static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  346. void *p_data, unsigned int bytes)
  347. {
  348. write_vreg(vgpu, offset, p_data, bytes);
  349. if (vgpu_vreg(vgpu, offset) & DDI_BUF_CTL_ENABLE) {
  350. vgpu_vreg(vgpu, offset) &= ~DDI_BUF_IS_IDLE;
  351. } else {
  352. vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
  353. if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
  354. vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E))
  355. &= ~DP_TP_STATUS_AUTOTRAIN_DONE;
  356. }
  357. return 0;
  358. }
  359. static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
  360. unsigned int offset, void *p_data, unsigned int bytes)
  361. {
  362. vgpu_vreg(vgpu, offset) &= ~*(u32 *)p_data;
  363. return 0;
  364. }
  365. #define FDI_LINK_TRAIN_PATTERN1 0
  366. #define FDI_LINK_TRAIN_PATTERN2 1
  367. static int fdi_auto_training_started(struct intel_vgpu *vgpu)
  368. {
  369. u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E));
  370. u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
  371. u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E));
  372. if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
  373. (rx_ctl & FDI_RX_ENABLE) &&
  374. (rx_ctl & FDI_AUTO_TRAINING) &&
  375. (tx_ctl & DP_TP_CTL_ENABLE) &&
  376. (tx_ctl & DP_TP_CTL_FDI_AUTOTRAIN))
  377. return 1;
  378. else
  379. return 0;
  380. }
  381. static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
  382. enum pipe pipe, unsigned int train_pattern)
  383. {
  384. i915_reg_t fdi_rx_imr, fdi_tx_ctl, fdi_rx_ctl;
  385. unsigned int fdi_rx_check_bits, fdi_tx_check_bits;
  386. unsigned int fdi_rx_train_bits, fdi_tx_train_bits;
  387. unsigned int fdi_iir_check_bits;
  388. fdi_rx_imr = FDI_RX_IMR(pipe);
  389. fdi_tx_ctl = FDI_TX_CTL(pipe);
  390. fdi_rx_ctl = FDI_RX_CTL(pipe);
  391. if (train_pattern == FDI_LINK_TRAIN_PATTERN1) {
  392. fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_1_CPT;
  393. fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_1;
  394. fdi_iir_check_bits = FDI_RX_BIT_LOCK;
  395. } else if (train_pattern == FDI_LINK_TRAIN_PATTERN2) {
  396. fdi_rx_train_bits = FDI_LINK_TRAIN_PATTERN_2_CPT;
  397. fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
  398. fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
  399. } else {
  400. gvt_err("Invalid train pattern %d\n", train_pattern);
  401. return -EINVAL;
  402. }
  403. fdi_rx_check_bits = FDI_RX_ENABLE | fdi_rx_train_bits;
  404. fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
  405. /* If imr bit has been masked */
  406. if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
  407. return 0;
  408. if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
  409. == fdi_tx_check_bits)
  410. && ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
  411. == fdi_rx_check_bits))
  412. return 1;
  413. else
  414. return 0;
  415. }
  416. #define INVALID_INDEX (~0U)
  417. static unsigned int calc_index(unsigned int offset, unsigned int start,
  418. unsigned int next, unsigned int end, i915_reg_t i915_end)
  419. {
  420. unsigned int range = next - start;
  421. if (!end)
  422. end = i915_mmio_reg_offset(i915_end);
  423. if (offset < start || offset > end)
  424. return INVALID_INDEX;
  425. offset -= start;
  426. return offset / range;
  427. }
  428. #define FDI_RX_CTL_TO_PIPE(offset) \
  429. calc_index(offset, _FDI_RXA_CTL, _FDI_RXB_CTL, 0, FDI_RX_CTL(PIPE_C))
  430. #define FDI_TX_CTL_TO_PIPE(offset) \
  431. calc_index(offset, _FDI_TXA_CTL, _FDI_TXB_CTL, 0, FDI_TX_CTL(PIPE_C))
  432. #define FDI_RX_IMR_TO_PIPE(offset) \
  433. calc_index(offset, _FDI_RXA_IMR, _FDI_RXB_IMR, 0, FDI_RX_IMR(PIPE_C))
  434. static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
  435. unsigned int offset, void *p_data, unsigned int bytes)
  436. {
  437. i915_reg_t fdi_rx_iir;
  438. unsigned int index;
  439. int ret;
  440. if (FDI_RX_CTL_TO_PIPE(offset) != INVALID_INDEX)
  441. index = FDI_RX_CTL_TO_PIPE(offset);
  442. else if (FDI_TX_CTL_TO_PIPE(offset) != INVALID_INDEX)
  443. index = FDI_TX_CTL_TO_PIPE(offset);
  444. else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
  445. index = FDI_RX_IMR_TO_PIPE(offset);
  446. else {
  447. gvt_err("Unsupport registers %x\n", offset);
  448. return -EINVAL;
  449. }
  450. write_vreg(vgpu, offset, p_data, bytes);
  451. fdi_rx_iir = FDI_RX_IIR(index);
  452. ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN1);
  453. if (ret < 0)
  454. return ret;
  455. if (ret)
  456. vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
  457. ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
  458. if (ret < 0)
  459. return ret;
  460. if (ret)
  461. vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
  462. if (offset == _FDI_RXA_CTL)
  463. if (fdi_auto_training_started(vgpu))
  464. vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |=
  465. DP_TP_STATUS_AUTOTRAIN_DONE;
  466. return 0;
  467. }
  468. #define DP_TP_CTL_TO_PORT(offset) \
  469. calc_index(offset, _DP_TP_CTL_A, _DP_TP_CTL_B, 0, DP_TP_CTL(PORT_E))
  470. static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  471. void *p_data, unsigned int bytes)
  472. {
  473. i915_reg_t status_reg;
  474. unsigned int index;
  475. u32 data;
  476. write_vreg(vgpu, offset, p_data, bytes);
  477. index = DP_TP_CTL_TO_PORT(offset);
  478. data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
  479. if (data == 0x2) {
  480. status_reg = DP_TP_STATUS(index);
  481. vgpu_vreg(vgpu, status_reg) |= (1 << 25);
  482. }
  483. return 0;
  484. }
  485. static int dp_tp_status_mmio_write(struct intel_vgpu *vgpu,
  486. unsigned int offset, void *p_data, unsigned int bytes)
  487. {
  488. u32 reg_val;
  489. u32 sticky_mask;
  490. reg_val = *((u32 *)p_data);
  491. sticky_mask = GENMASK(27, 26) | (1 << 24);
  492. vgpu_vreg(vgpu, offset) = (reg_val & ~sticky_mask) |
  493. (vgpu_vreg(vgpu, offset) & sticky_mask);
  494. vgpu_vreg(vgpu, offset) &= ~(reg_val & sticky_mask);
  495. return 0;
  496. }
  497. static int pch_adpa_mmio_write(struct intel_vgpu *vgpu,
  498. unsigned int offset, void *p_data, unsigned int bytes)
  499. {
  500. u32 data;
  501. write_vreg(vgpu, offset, p_data, bytes);
  502. data = vgpu_vreg(vgpu, offset);
  503. if (data & ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
  504. vgpu_vreg(vgpu, offset) &= ~ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
  505. return 0;
  506. }
  507. static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
  508. unsigned int offset, void *p_data, unsigned int bytes)
  509. {
  510. u32 data;
  511. write_vreg(vgpu, offset, p_data, bytes);
  512. data = vgpu_vreg(vgpu, offset);
  513. if (data & FDI_MPHY_IOSFSB_RESET_CTL)
  514. vgpu_vreg(vgpu, offset) |= FDI_MPHY_IOSFSB_RESET_STATUS;
  515. else
  516. vgpu_vreg(vgpu, offset) &= ~FDI_MPHY_IOSFSB_RESET_STATUS;
  517. return 0;
  518. }
  519. #define DSPSURF_TO_PIPE(offset) \
  520. calc_index(offset, _DSPASURF, _DSPBSURF, 0, DSPSURF(PIPE_C))
  521. static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  522. void *p_data, unsigned int bytes)
  523. {
  524. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  525. unsigned int index = DSPSURF_TO_PIPE(offset);
  526. i915_reg_t surflive_reg = DSPSURFLIVE(index);
  527. int flip_event[] = {
  528. [PIPE_A] = PRIMARY_A_FLIP_DONE,
  529. [PIPE_B] = PRIMARY_B_FLIP_DONE,
  530. [PIPE_C] = PRIMARY_C_FLIP_DONE,
  531. };
  532. write_vreg(vgpu, offset, p_data, bytes);
  533. vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
  534. set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
  535. return 0;
  536. }
  537. #define SPRSURF_TO_PIPE(offset) \
  538. calc_index(offset, _SPRA_SURF, _SPRB_SURF, 0, SPRSURF(PIPE_C))
  539. static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  540. void *p_data, unsigned int bytes)
  541. {
  542. unsigned int index = SPRSURF_TO_PIPE(offset);
  543. i915_reg_t surflive_reg = SPRSURFLIVE(index);
  544. int flip_event[] = {
  545. [PIPE_A] = SPRITE_A_FLIP_DONE,
  546. [PIPE_B] = SPRITE_B_FLIP_DONE,
  547. [PIPE_C] = SPRITE_C_FLIP_DONE,
  548. };
  549. write_vreg(vgpu, offset, p_data, bytes);
  550. vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
  551. set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
  552. return 0;
  553. }
  554. static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
  555. unsigned int reg)
  556. {
  557. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  558. enum intel_gvt_event_type event;
  559. if (reg == _DPA_AUX_CH_CTL)
  560. event = AUX_CHANNEL_A;
  561. else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
  562. event = AUX_CHANNEL_B;
  563. else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
  564. event = AUX_CHANNEL_C;
  565. else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
  566. event = AUX_CHANNEL_D;
  567. else {
  568. WARN_ON(true);
  569. return -EINVAL;
  570. }
  571. intel_vgpu_trigger_virtual_event(vgpu, event);
  572. return 0;
  573. }
  574. static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
  575. unsigned int reg, int len, bool data_valid)
  576. {
  577. /* mark transaction done */
  578. value |= DP_AUX_CH_CTL_DONE;
  579. value &= ~DP_AUX_CH_CTL_SEND_BUSY;
  580. value &= ~DP_AUX_CH_CTL_RECEIVE_ERROR;
  581. if (data_valid)
  582. value &= ~DP_AUX_CH_CTL_TIME_OUT_ERROR;
  583. else
  584. value |= DP_AUX_CH_CTL_TIME_OUT_ERROR;
  585. /* message size */
  586. value &= ~(0xf << 20);
  587. value |= (len << 20);
  588. vgpu_vreg(vgpu, reg) = value;
  589. if (value & DP_AUX_CH_CTL_INTERRUPT)
  590. return trigger_aux_channel_interrupt(vgpu, reg);
  591. return 0;
  592. }
  593. static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
  594. uint8_t t)
  595. {
  596. if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
  597. /* training pattern 1 for CR */
  598. /* set LANE0_CR_DONE, LANE1_CR_DONE */
  599. dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
  600. /* set LANE2_CR_DONE, LANE3_CR_DONE */
  601. dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
  602. } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
  603. DPCD_TRAINING_PATTERN_2) {
  604. /* training pattern 2 for EQ */
  605. /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
  606. dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
  607. dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
  608. /* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
  609. dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
  610. dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
  611. /* set INTERLANE_ALIGN_DONE */
  612. dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
  613. DPCD_INTERLANE_ALIGN_DONE;
  614. } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
  615. DPCD_LINK_TRAINING_DISABLED) {
  616. /* finish link training */
  617. /* set sink status as synchronized */
  618. dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
  619. }
  620. }
  621. #define _REG_HSW_DP_AUX_CH_CTL(dp) \
  622. ((dp) ? (_PCH_DPB_AUX_CH_CTL + ((dp)-1)*0x100) : 0x64010)
  623. #define _REG_SKL_DP_AUX_CH_CTL(dp) (0x64010 + (dp) * 0x100)
  624. #define OFFSET_TO_DP_AUX_PORT(offset) (((offset) & 0xF00) >> 8)
  625. #define dpy_is_valid_port(port) \
  626. (((port) >= PORT_A) && ((port) < I915_MAX_PORTS))
  627. static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
  628. unsigned int offset, void *p_data, unsigned int bytes)
  629. {
  630. struct intel_vgpu_display *display = &vgpu->display;
  631. int msg, addr, ctrl, op, len;
  632. int port_index = OFFSET_TO_DP_AUX_PORT(offset);
  633. struct intel_vgpu_dpcd_data *dpcd = NULL;
  634. struct intel_vgpu_port *port = NULL;
  635. u32 data;
  636. if (!dpy_is_valid_port(port_index)) {
  637. gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
  638. return 0;
  639. }
  640. write_vreg(vgpu, offset, p_data, bytes);
  641. data = vgpu_vreg(vgpu, offset);
  642. if (IS_SKYLAKE(vgpu->gvt->dev_priv) &&
  643. offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
  644. /* SKL DPB/C/D aux ctl register changed */
  645. return 0;
  646. } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
  647. offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
  648. /* write to the data registers */
  649. return 0;
  650. }
  651. if (!(data & DP_AUX_CH_CTL_SEND_BUSY)) {
  652. /* just want to clear the sticky bits */
  653. vgpu_vreg(vgpu, offset) = 0;
  654. return 0;
  655. }
  656. port = &display->ports[port_index];
  657. dpcd = port->dpcd;
  658. /* read out message from DATA1 register */
  659. msg = vgpu_vreg(vgpu, offset + 4);
  660. addr = (msg >> 8) & 0xffff;
  661. ctrl = (msg >> 24) & 0xff;
  662. len = msg & 0xff;
  663. op = ctrl >> 4;
  664. if (op == GVT_AUX_NATIVE_WRITE) {
  665. int t;
  666. uint8_t buf[16];
  667. if ((addr + len + 1) >= DPCD_SIZE) {
  668. /*
  669. * Write request exceeds what we supported,
  670. * DCPD spec: When a Source Device is writing a DPCD
  671. * address not supported by the Sink Device, the Sink
  672. * Device shall reply with AUX NACK and “M” equal to
  673. * zero.
  674. */
  675. /* NAK the write */
  676. vgpu_vreg(vgpu, offset + 4) = AUX_NATIVE_REPLY_NAK;
  677. dp_aux_ch_ctl_trans_done(vgpu, data, offset, 2, true);
  678. return 0;
  679. }
  680. /*
  681. * Write request format: (command + address) occupies
  682. * 3 bytes, followed by (len + 1) bytes of data.
  683. */
  684. if (WARN_ON((len + 4) > AUX_BURST_SIZE))
  685. return -EINVAL;
  686. /* unpack data from vreg to buf */
  687. for (t = 0; t < 4; t++) {
  688. u32 r = vgpu_vreg(vgpu, offset + 8 + t * 4);
  689. buf[t * 4] = (r >> 24) & 0xff;
  690. buf[t * 4 + 1] = (r >> 16) & 0xff;
  691. buf[t * 4 + 2] = (r >> 8) & 0xff;
  692. buf[t * 4 + 3] = r & 0xff;
  693. }
  694. /* write to virtual DPCD */
  695. if (dpcd && dpcd->data_valid) {
  696. for (t = 0; t <= len; t++) {
  697. int p = addr + t;
  698. dpcd->data[p] = buf[t];
  699. /* check for link training */
  700. if (p == DPCD_TRAINING_PATTERN_SET)
  701. dp_aux_ch_ctl_link_training(dpcd,
  702. buf[t]);
  703. }
  704. }
  705. /* ACK the write */
  706. vgpu_vreg(vgpu, offset + 4) = 0;
  707. dp_aux_ch_ctl_trans_done(vgpu, data, offset, 1,
  708. dpcd && dpcd->data_valid);
  709. return 0;
  710. }
  711. if (op == GVT_AUX_NATIVE_READ) {
  712. int idx, i, ret = 0;
  713. if ((addr + len + 1) >= DPCD_SIZE) {
  714. /*
  715. * read request exceeds what we supported
  716. * DPCD spec: A Sink Device receiving a Native AUX CH
  717. * read request for an unsupported DPCD address must
  718. * reply with an AUX ACK and read data set equal to
  719. * zero instead of replying with AUX NACK.
  720. */
  721. /* ACK the READ*/
  722. vgpu_vreg(vgpu, offset + 4) = 0;
  723. vgpu_vreg(vgpu, offset + 8) = 0;
  724. vgpu_vreg(vgpu, offset + 12) = 0;
  725. vgpu_vreg(vgpu, offset + 16) = 0;
  726. vgpu_vreg(vgpu, offset + 20) = 0;
  727. dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
  728. true);
  729. return 0;
  730. }
  731. for (idx = 1; idx <= 5; idx++) {
  732. /* clear the data registers */
  733. vgpu_vreg(vgpu, offset + 4 * idx) = 0;
  734. }
  735. /*
  736. * Read reply format: ACK (1 byte) plus (len + 1) bytes of data.
  737. */
  738. if (WARN_ON((len + 2) > AUX_BURST_SIZE))
  739. return -EINVAL;
  740. /* read from virtual DPCD to vreg */
  741. /* first 4 bytes: [ACK][addr][addr+1][addr+2] */
  742. if (dpcd && dpcd->data_valid) {
  743. for (i = 1; i <= (len + 1); i++) {
  744. int t;
  745. t = dpcd->data[addr + i - 1];
  746. t <<= (24 - 8 * (i % 4));
  747. ret |= t;
  748. if ((i % 4 == 3) || (i == (len + 1))) {
  749. vgpu_vreg(vgpu, offset +
  750. (i / 4 + 1) * 4) = ret;
  751. ret = 0;
  752. }
  753. }
  754. }
  755. dp_aux_ch_ctl_trans_done(vgpu, data, offset, len + 2,
  756. dpcd && dpcd->data_valid);
  757. return 0;
  758. }
  759. /* i2c transaction starts */
  760. intel_gvt_i2c_handle_aux_ch_write(vgpu, port_index, offset, p_data);
  761. if (data & DP_AUX_CH_CTL_INTERRUPT)
  762. trigger_aux_channel_interrupt(vgpu, offset);
  763. return 0;
  764. }
  765. static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  766. void *p_data, unsigned int bytes)
  767. {
  768. bool vga_disable;
  769. write_vreg(vgpu, offset, p_data, bytes);
  770. vga_disable = vgpu_vreg(vgpu, offset) & VGA_DISP_DISABLE;
  771. gvt_dbg_core("vgpu%d: %s VGA mode\n", vgpu->id,
  772. vga_disable ? "Disable" : "Enable");
  773. return 0;
  774. }
  775. static u32 read_virtual_sbi_register(struct intel_vgpu *vgpu,
  776. unsigned int sbi_offset)
  777. {
  778. struct intel_vgpu_display *display = &vgpu->display;
  779. int num = display->sbi.number;
  780. int i;
  781. for (i = 0; i < num; ++i)
  782. if (display->sbi.registers[i].offset == sbi_offset)
  783. break;
  784. if (i == num)
  785. return 0;
  786. return display->sbi.registers[i].value;
  787. }
  788. static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
  789. unsigned int offset, u32 value)
  790. {
  791. struct intel_vgpu_display *display = &vgpu->display;
  792. int num = display->sbi.number;
  793. int i;
  794. for (i = 0; i < num; ++i) {
  795. if (display->sbi.registers[i].offset == offset)
  796. break;
  797. }
  798. if (i == num) {
  799. if (num == SBI_REG_MAX) {
  800. gvt_err("vgpu%d: SBI caching meets maximum limits\n",
  801. vgpu->id);
  802. return;
  803. }
  804. display->sbi.number++;
  805. }
  806. display->sbi.registers[i].offset = offset;
  807. display->sbi.registers[i].value = value;
  808. }
  809. static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  810. void *p_data, unsigned int bytes)
  811. {
  812. if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
  813. SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
  814. unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
  815. SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
  816. vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
  817. sbi_offset);
  818. }
  819. read_vreg(vgpu, offset, p_data, bytes);
  820. return 0;
  821. }
  822. static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  823. void *p_data, unsigned int bytes)
  824. {
  825. u32 data;
  826. write_vreg(vgpu, offset, p_data, bytes);
  827. data = vgpu_vreg(vgpu, offset);
  828. data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT);
  829. data |= SBI_READY;
  830. data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT);
  831. data |= SBI_RESPONSE_SUCCESS;
  832. vgpu_vreg(vgpu, offset) = data;
  833. if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
  834. SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
  835. unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
  836. SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
  837. write_virtual_sbi_register(vgpu, sbi_offset,
  838. vgpu_vreg(vgpu, SBI_DATA));
  839. }
  840. return 0;
  841. }
  842. #define _vgtif_reg(x) \
  843. (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))
  844. static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  845. void *p_data, unsigned int bytes)
  846. {
  847. bool invalid_read = false;
  848. read_vreg(vgpu, offset, p_data, bytes);
  849. switch (offset) {
  850. case _vgtif_reg(magic) ... _vgtif_reg(vgt_id):
  851. if (offset + bytes > _vgtif_reg(vgt_id) + 4)
  852. invalid_read = true;
  853. break;
  854. case _vgtif_reg(avail_rs.mappable_gmadr.base) ...
  855. _vgtif_reg(avail_rs.fence_num):
  856. if (offset + bytes >
  857. _vgtif_reg(avail_rs.fence_num) + 4)
  858. invalid_read = true;
  859. break;
  860. case 0x78010: /* vgt_caps */
  861. case 0x7881c:
  862. break;
  863. default:
  864. invalid_read = true;
  865. break;
  866. }
  867. if (invalid_read)
  868. gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
  869. offset, bytes, *(u32 *)p_data);
  870. return 0;
  871. }
  872. static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
  873. {
  874. int ret = 0;
  875. switch (notification) {
  876. case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
  877. ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3);
  878. break;
  879. case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
  880. ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
  881. break;
  882. case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
  883. ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4);
  884. break;
  885. case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
  886. ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4);
  887. break;
  888. case VGT_G2V_EXECLIST_CONTEXT_CREATE:
  889. case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
  890. case 1: /* Remove this in guest driver. */
  891. break;
  892. default:
  893. gvt_err("Invalid PV notification %d\n", notification);
  894. }
  895. return ret;
  896. }
  897. static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
  898. {
  899. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  900. struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
  901. char *env[3] = {NULL, NULL, NULL};
  902. char vmid_str[20];
  903. char display_ready_str[20];
  904. snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
  905. env[0] = display_ready_str;
  906. snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
  907. env[1] = vmid_str;
  908. return kobject_uevent_env(kobj, KOBJ_ADD, env);
  909. }
  910. static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  911. void *p_data, unsigned int bytes)
  912. {
  913. u32 data;
  914. int ret;
  915. write_vreg(vgpu, offset, p_data, bytes);
  916. data = vgpu_vreg(vgpu, offset);
  917. switch (offset) {
  918. case _vgtif_reg(display_ready):
  919. send_display_ready_uevent(vgpu, data ? 1 : 0);
  920. break;
  921. case _vgtif_reg(g2v_notify):
  922. ret = handle_g2v_notification(vgpu, data);
  923. break;
  924. /* add xhot and yhot to handled list to avoid error log */
  925. case 0x78830:
  926. case 0x78834:
  927. case _vgtif_reg(pdp[0].lo):
  928. case _vgtif_reg(pdp[0].hi):
  929. case _vgtif_reg(pdp[1].lo):
  930. case _vgtif_reg(pdp[1].hi):
  931. case _vgtif_reg(pdp[2].lo):
  932. case _vgtif_reg(pdp[2].hi):
  933. case _vgtif_reg(pdp[3].lo):
  934. case _vgtif_reg(pdp[3].hi):
  935. case _vgtif_reg(execlist_context_descriptor_lo):
  936. case _vgtif_reg(execlist_context_descriptor_hi):
  937. break;
  938. default:
  939. gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
  940. offset, bytes, data);
  941. break;
  942. }
  943. return 0;
  944. }
  945. static int pf_write(struct intel_vgpu *vgpu,
  946. unsigned int offset, void *p_data, unsigned int bytes)
  947. {
  948. u32 val = *(u32 *)p_data;
  949. if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
  950. offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
  951. offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
  952. WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
  953. vgpu->id);
  954. return 0;
  955. }
  956. return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
  957. }
  958. static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
  959. unsigned int offset, void *p_data, unsigned int bytes)
  960. {
  961. write_vreg(vgpu, offset, p_data, bytes);
  962. if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_ENABLE_REQUEST)
  963. vgpu_vreg(vgpu, offset) |= HSW_PWR_WELL_STATE_ENABLED;
  964. else
  965. vgpu_vreg(vgpu, offset) &= ~HSW_PWR_WELL_STATE_ENABLED;
  966. return 0;
  967. }
  968. static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
  969. unsigned int offset, void *p_data, unsigned int bytes)
  970. {
  971. write_vreg(vgpu, offset, p_data, bytes);
  972. if (vgpu_vreg(vgpu, offset) & FPGA_DBG_RM_NOCLAIM)
  973. vgpu_vreg(vgpu, offset) &= ~FPGA_DBG_RM_NOCLAIM;
  974. return 0;
  975. }
  976. static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
  977. void *p_data, unsigned int bytes)
  978. {
  979. u32 mode;
  980. write_vreg(vgpu, offset, p_data, bytes);
  981. mode = vgpu_vreg(vgpu, offset);
  982. if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
  983. WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
  984. vgpu->id);
  985. return 0;
  986. }
  987. return 0;
  988. }
  989. static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
  990. void *p_data, unsigned int bytes)
  991. {
  992. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  993. u32 trtte = *(u32 *)p_data;
  994. if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
  995. WARN(1, "VM(%d): Use physical address for TRTT!\n",
  996. vgpu->id);
  997. return -EINVAL;
  998. }
  999. write_vreg(vgpu, offset, p_data, bytes);
  1000. /* TRTTE is not per-context */
  1001. I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
  1002. return 0;
  1003. }
  1004. static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
  1005. void *p_data, unsigned int bytes)
  1006. {
  1007. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  1008. u32 val = *(u32 *)p_data;
  1009. if (val & 1) {
  1010. /* unblock hw logic */
  1011. I915_WRITE(_MMIO(offset), val);
  1012. }
  1013. write_vreg(vgpu, offset, p_data, bytes);
  1014. return 0;
  1015. }
  1016. static int dpll_status_read(struct intel_vgpu *vgpu, unsigned int offset,
  1017. void *p_data, unsigned int bytes)
  1018. {
  1019. u32 v = 0;
  1020. if (vgpu_vreg(vgpu, 0x46010) & (1 << 31))
  1021. v |= (1 << 0);
  1022. if (vgpu_vreg(vgpu, 0x46014) & (1 << 31))
  1023. v |= (1 << 8);
  1024. if (vgpu_vreg(vgpu, 0x46040) & (1 << 31))
  1025. v |= (1 << 16);
  1026. if (vgpu_vreg(vgpu, 0x46060) & (1 << 31))
  1027. v |= (1 << 24);
  1028. vgpu_vreg(vgpu, offset) = v;
  1029. return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
  1030. }
  1031. static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
  1032. void *p_data, unsigned int bytes)
  1033. {
  1034. u32 value = *(u32 *)p_data;
  1035. u32 cmd = value & 0xff;
  1036. u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
  1037. switch (cmd) {
  1038. case 0x6:
  1039. /**
  1040. * "Read memory latency" command on gen9.
  1041. * Below memory latency values are read
  1042. * from skylake platform.
  1043. */
  1044. if (!*data0)
  1045. *data0 = 0x1e1a1100;
  1046. else
  1047. *data0 = 0x61514b3d;
  1048. break;
  1049. case 0x5:
  1050. *data0 |= 0x1;
  1051. break;
  1052. }
  1053. gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
  1054. vgpu->id, value, *data0);
  1055. value &= ~(1 << 31);
  1056. return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
  1057. }
  1058. static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
  1059. unsigned int offset, void *p_data, unsigned int bytes)
  1060. {
  1061. u32 v = *(u32 *)p_data;
  1062. v &= (1 << 31) | (1 << 29) | (1 << 9) |
  1063. (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
  1064. v |= (v >> 1);
  1065. return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
  1066. }
  1067. static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
  1068. void *p_data, unsigned int bytes)
  1069. {
  1070. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  1071. i915_reg_t reg = {.reg = offset};
  1072. switch (offset) {
  1073. case 0x4ddc:
  1074. vgpu_vreg(vgpu, offset) = 0x8000003c;
  1075. /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
  1076. I915_WRITE(reg, vgpu_vreg(vgpu, offset));
  1077. break;
  1078. case 0x42080:
  1079. vgpu_vreg(vgpu, offset) = 0x8000;
  1080. /* WaCompressedResourceDisplayNewHashMode:skl */
  1081. I915_WRITE(reg, vgpu_vreg(vgpu, offset));
  1082. break;
  1083. default:
  1084. return -EINVAL;
  1085. }
  1086. return 0;
  1087. }
  1088. static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
  1089. void *p_data, unsigned int bytes)
  1090. {
  1091. u32 v = *(u32 *)p_data;
  1092. /* other bits are MBZ. */
  1093. v &= (1 << 31) | (1 << 30);
  1094. v & (1 << 31) ? (v |= (1 << 30)) : (v &= ~(1 << 30));
  1095. vgpu_vreg(vgpu, offset) = v;
  1096. return 0;
  1097. }
  1098. static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
  1099. unsigned int offset, void *p_data, unsigned int bytes)
  1100. {
  1101. struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
  1102. vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
  1103. return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
  1104. }
  1105. static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  1106. void *p_data, unsigned int bytes)
  1107. {
  1108. int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
  1109. struct intel_vgpu_execlist *execlist;
  1110. u32 data = *(u32 *)p_data;
  1111. int ret = 0;
  1112. if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
  1113. return -EINVAL;
  1114. execlist = &vgpu->execlist[ring_id];
  1115. execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
  1116. if (execlist->elsp_dwords.index == 3) {
  1117. ret = intel_vgpu_submit_execlist(vgpu, ring_id);
  1118. if(ret)
  1119. gvt_err("fail submit workload on ring %d\n", ring_id);
  1120. }
  1121. ++execlist->elsp_dwords.index;
  1122. execlist->elsp_dwords.index &= 0x3;
  1123. return ret;
  1124. }
  1125. static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  1126. void *p_data, unsigned int bytes)
  1127. {
  1128. u32 data = *(u32 *)p_data;
  1129. int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
  1130. bool enable_execlist;
  1131. write_vreg(vgpu, offset, p_data, bytes);
  1132. if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
  1133. || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
  1134. enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
  1135. gvt_dbg_core("EXECLIST %s on ring %d\n",
  1136. (enable_execlist ? "enabling" : "disabling"),
  1137. ring_id);
  1138. if (enable_execlist)
  1139. intel_vgpu_start_schedule(vgpu);
  1140. }
  1141. return 0;
  1142. }
  1143. static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
  1144. unsigned int offset, void *p_data, unsigned int bytes)
  1145. {
  1146. int rc = 0;
  1147. unsigned int id = 0;
  1148. write_vreg(vgpu, offset, p_data, bytes);
  1149. vgpu_vreg(vgpu, offset) = 0;
  1150. switch (offset) {
  1151. case 0x4260:
  1152. id = RCS;
  1153. break;
  1154. case 0x4264:
  1155. id = VCS;
  1156. break;
  1157. case 0x4268:
  1158. id = VCS2;
  1159. break;
  1160. case 0x426c:
  1161. id = BCS;
  1162. break;
  1163. case 0x4270:
  1164. id = VECS;
  1165. break;
  1166. default:
  1167. rc = -EINVAL;
  1168. break;
  1169. }
  1170. set_bit(id, (void *)vgpu->tlb_handle_pending);
  1171. return rc;
  1172. }
  1173. static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
  1174. unsigned int offset, void *p_data, unsigned int bytes)
  1175. {
  1176. u32 data;
  1177. write_vreg(vgpu, offset, p_data, bytes);
  1178. data = vgpu_vreg(vgpu, offset);
  1179. if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
  1180. data |= RESET_CTL_READY_TO_RESET;
  1181. else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
  1182. data &= ~RESET_CTL_READY_TO_RESET;
  1183. vgpu_vreg(vgpu, offset) = data;
  1184. return 0;
  1185. }
  1186. #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
  1187. ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
  1188. f, s, am, rm, d, r, w); \
  1189. if (ret) \
  1190. return ret; \
  1191. } while (0)
  1192. #define MMIO_D(reg, d) \
  1193. MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
  1194. #define MMIO_DH(reg, d, r, w) \
  1195. MMIO_F(reg, 4, 0, 0, 0, d, r, w)
  1196. #define MMIO_DFH(reg, d, f, r, w) \
  1197. MMIO_F(reg, 4, f, 0, 0, d, r, w)
  1198. #define MMIO_GM(reg, d, r, w) \
  1199. MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
  1200. #define MMIO_RO(reg, d, f, rm, r, w) \
  1201. MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
  1202. #define MMIO_RING_F(prefix, s, f, am, rm, d, r, w) do { \
  1203. MMIO_F(prefix(RENDER_RING_BASE), s, f, am, rm, d, r, w); \
  1204. MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
  1205. MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
  1206. MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
  1207. } while (0)
  1208. #define MMIO_RING_D(prefix, d) \
  1209. MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
  1210. #define MMIO_RING_DFH(prefix, d, f, r, w) \
  1211. MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
  1212. #define MMIO_RING_GM(prefix, d, r, w) \
  1213. MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
  1214. #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
  1215. MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
  1216. static int init_generic_mmio_info(struct intel_gvt *gvt)
  1217. {
  1218. struct drm_i915_private *dev_priv = gvt->dev_priv;
  1219. int ret;
  1220. MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
  1221. MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
  1222. MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
  1223. MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
  1224. MMIO_D(SDEISR, D_ALL);
  1225. MMIO_RING_D(RING_HWSTAM, D_ALL);
  1226. MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
  1227. MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
  1228. MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
  1229. MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
  1230. #define RING_REG(base) (base + 0x28)
  1231. MMIO_RING_D(RING_REG, D_ALL);
  1232. #undef RING_REG
  1233. #define RING_REG(base) (base + 0x134)
  1234. MMIO_RING_D(RING_REG, D_ALL);
  1235. #undef RING_REG
  1236. MMIO_GM(0x2148, D_ALL, NULL, NULL);
  1237. MMIO_GM(CCID, D_ALL, NULL, NULL);
  1238. MMIO_GM(0x12198, D_ALL, NULL, NULL);
  1239. MMIO_D(GEN7_CXT_SIZE, D_ALL);
  1240. MMIO_RING_D(RING_TAIL, D_ALL);
  1241. MMIO_RING_D(RING_HEAD, D_ALL);
  1242. MMIO_RING_D(RING_CTL, D_ALL);
  1243. MMIO_RING_D(RING_ACTHD, D_ALL);
  1244. MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
  1245. /* RING MODE */
  1246. #define RING_REG(base) (base + 0x29c)
  1247. MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
  1248. #undef RING_REG
  1249. MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
  1250. MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
  1251. MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
  1252. ring_timestamp_mmio_read, NULL);
  1253. MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
  1254. ring_timestamp_mmio_read, NULL);
  1255. MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
  1256. MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
  1257. MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1258. MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
  1259. MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
  1260. MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
  1261. MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
  1262. MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
  1263. MMIO_D(GAM_ECOCHK, D_ALL);
  1264. MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
  1265. MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1266. MMIO_D(0x9030, D_ALL);
  1267. MMIO_D(0x20a0, D_ALL);
  1268. MMIO_D(0x2420, D_ALL);
  1269. MMIO_D(0x2430, D_ALL);
  1270. MMIO_D(0x2434, D_ALL);
  1271. MMIO_D(0x2438, D_ALL);
  1272. MMIO_D(0x243c, D_ALL);
  1273. MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
  1274. MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  1275. MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
  1276. /* display */
  1277. MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
  1278. MMIO_D(0x602a0, D_ALL);
  1279. MMIO_D(0x65050, D_ALL);
  1280. MMIO_D(0x650b4, D_ALL);
  1281. MMIO_D(0xc4040, D_ALL);
  1282. MMIO_D(DERRMR, D_ALL);
  1283. MMIO_D(PIPEDSL(PIPE_A), D_ALL);
  1284. MMIO_D(PIPEDSL(PIPE_B), D_ALL);
  1285. MMIO_D(PIPEDSL(PIPE_C), D_ALL);
  1286. MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
  1287. MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
  1288. MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
  1289. MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
  1290. MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
  1291. MMIO_D(PIPESTAT(PIPE_A), D_ALL);
  1292. MMIO_D(PIPESTAT(PIPE_B), D_ALL);
  1293. MMIO_D(PIPESTAT(PIPE_C), D_ALL);
  1294. MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
  1295. MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
  1296. MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
  1297. MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
  1298. MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
  1299. MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
  1300. MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
  1301. MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
  1302. MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
  1303. MMIO_D(CURCNTR(PIPE_A), D_ALL);
  1304. MMIO_D(CURCNTR(PIPE_B), D_ALL);
  1305. MMIO_D(CURCNTR(PIPE_C), D_ALL);
  1306. MMIO_D(CURPOS(PIPE_A), D_ALL);
  1307. MMIO_D(CURPOS(PIPE_B), D_ALL);
  1308. MMIO_D(CURPOS(PIPE_C), D_ALL);
  1309. MMIO_D(CURBASE(PIPE_A), D_ALL);
  1310. MMIO_D(CURBASE(PIPE_B), D_ALL);
  1311. MMIO_D(CURBASE(PIPE_C), D_ALL);
  1312. MMIO_D(0x700ac, D_ALL);
  1313. MMIO_D(0x710ac, D_ALL);
  1314. MMIO_D(0x720ac, D_ALL);
  1315. MMIO_D(0x70090, D_ALL);
  1316. MMIO_D(0x70094, D_ALL);
  1317. MMIO_D(0x70098, D_ALL);
  1318. MMIO_D(0x7009c, D_ALL);
  1319. MMIO_D(DSPCNTR(PIPE_A), D_ALL);
  1320. MMIO_D(DSPADDR(PIPE_A), D_ALL);
  1321. MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
  1322. MMIO_D(DSPPOS(PIPE_A), D_ALL);
  1323. MMIO_D(DSPSIZE(PIPE_A), D_ALL);
  1324. MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
  1325. MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
  1326. MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
  1327. MMIO_D(DSPCNTR(PIPE_B), D_ALL);
  1328. MMIO_D(DSPADDR(PIPE_B), D_ALL);
  1329. MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
  1330. MMIO_D(DSPPOS(PIPE_B), D_ALL);
  1331. MMIO_D(DSPSIZE(PIPE_B), D_ALL);
  1332. MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
  1333. MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
  1334. MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
  1335. MMIO_D(DSPCNTR(PIPE_C), D_ALL);
  1336. MMIO_D(DSPADDR(PIPE_C), D_ALL);
  1337. MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
  1338. MMIO_D(DSPPOS(PIPE_C), D_ALL);
  1339. MMIO_D(DSPSIZE(PIPE_C), D_ALL);
  1340. MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
  1341. MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
  1342. MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
  1343. MMIO_D(SPRCTL(PIPE_A), D_ALL);
  1344. MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
  1345. MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
  1346. MMIO_D(SPRPOS(PIPE_A), D_ALL);
  1347. MMIO_D(SPRSIZE(PIPE_A), D_ALL);
  1348. MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
  1349. MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
  1350. MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
  1351. MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
  1352. MMIO_D(SPROFFSET(PIPE_A), D_ALL);
  1353. MMIO_D(SPRSCALE(PIPE_A), D_ALL);
  1354. MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
  1355. MMIO_D(SPRCTL(PIPE_B), D_ALL);
  1356. MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
  1357. MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
  1358. MMIO_D(SPRPOS(PIPE_B), D_ALL);
  1359. MMIO_D(SPRSIZE(PIPE_B), D_ALL);
  1360. MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
  1361. MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
  1362. MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
  1363. MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
  1364. MMIO_D(SPROFFSET(PIPE_B), D_ALL);
  1365. MMIO_D(SPRSCALE(PIPE_B), D_ALL);
  1366. MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
  1367. MMIO_D(SPRCTL(PIPE_C), D_ALL);
  1368. MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
  1369. MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
  1370. MMIO_D(SPRPOS(PIPE_C), D_ALL);
  1371. MMIO_D(SPRSIZE(PIPE_C), D_ALL);
  1372. MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
  1373. MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
  1374. MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
  1375. MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
  1376. MMIO_D(SPROFFSET(PIPE_C), D_ALL);
  1377. MMIO_D(SPRSCALE(PIPE_C), D_ALL);
  1378. MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
  1379. MMIO_F(LGC_PALETTE(PIPE_A, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
  1380. MMIO_F(LGC_PALETTE(PIPE_B, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
  1381. MMIO_F(LGC_PALETTE(PIPE_C, 0), 4 * 256, 0, 0, 0, D_ALL, NULL, NULL);
  1382. MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
  1383. MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
  1384. MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
  1385. MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
  1386. MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
  1387. MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
  1388. MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
  1389. MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
  1390. MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
  1391. MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
  1392. MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
  1393. MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
  1394. MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
  1395. MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
  1396. MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
  1397. MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
  1398. MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
  1399. MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
  1400. MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
  1401. MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
  1402. MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
  1403. MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
  1404. MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
  1405. MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
  1406. MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
  1407. MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
  1408. MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
  1409. MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
  1410. MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
  1411. MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
  1412. MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
  1413. MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
  1414. MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
  1415. MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
  1416. MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
  1417. MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
  1418. MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
  1419. MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
  1420. MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
  1421. MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
  1422. MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
  1423. MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
  1424. MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
  1425. MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
  1426. MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
  1427. MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
  1428. MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
  1429. MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
  1430. MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
  1431. MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
  1432. MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
  1433. MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
  1434. MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
  1435. MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
  1436. MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
  1437. MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
  1438. MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
  1439. MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
  1440. MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
  1441. MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
  1442. MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
  1443. MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
  1444. MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
  1445. MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
  1446. MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
  1447. MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
  1448. MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
  1449. MMIO_D(PF_CTL(PIPE_A), D_ALL);
  1450. MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
  1451. MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
  1452. MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
  1453. MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
  1454. MMIO_D(PF_CTL(PIPE_B), D_ALL);
  1455. MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
  1456. MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
  1457. MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
  1458. MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
  1459. MMIO_D(PF_CTL(PIPE_C), D_ALL);
  1460. MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
  1461. MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
  1462. MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
  1463. MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
  1464. MMIO_D(WM0_PIPEA_ILK, D_ALL);
  1465. MMIO_D(WM0_PIPEB_ILK, D_ALL);
  1466. MMIO_D(WM0_PIPEC_IVB, D_ALL);
  1467. MMIO_D(WM1_LP_ILK, D_ALL);
  1468. MMIO_D(WM2_LP_ILK, D_ALL);
  1469. MMIO_D(WM3_LP_ILK, D_ALL);
  1470. MMIO_D(WM1S_LP_ILK, D_ALL);
  1471. MMIO_D(WM2S_LP_IVB, D_ALL);
  1472. MMIO_D(WM3S_LP_IVB, D_ALL);
  1473. MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
  1474. MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
  1475. MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
  1476. MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
  1477. MMIO_D(0x48268, D_ALL);
  1478. MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
  1479. gmbus_mmio_write);
  1480. MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
  1481. MMIO_F(0xe4f00, 0x28, 0, 0, 0, D_ALL, NULL, NULL);
  1482. MMIO_F(_PCH_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
  1483. dp_aux_ch_ctl_mmio_write);
  1484. MMIO_F(_PCH_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
  1485. dp_aux_ch_ctl_mmio_write);
  1486. MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
  1487. dp_aux_ch_ctl_mmio_write);
  1488. MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write);
  1489. MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
  1490. MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
  1491. MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
  1492. MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
  1493. MMIO_DH(FDI_RX_IIR(PIPE_C), D_ALL, NULL, fdi_rx_iir_mmio_write);
  1494. MMIO_DH(FDI_RX_IMR(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
  1495. MMIO_DH(FDI_RX_IMR(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
  1496. MMIO_DH(FDI_RX_IMR(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
  1497. MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
  1498. MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
  1499. MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
  1500. MMIO_D(_PCH_TRANS_HTOTAL_A, D_ALL);
  1501. MMIO_D(_PCH_TRANS_HBLANK_A, D_ALL);
  1502. MMIO_D(_PCH_TRANS_HSYNC_A, D_ALL);
  1503. MMIO_D(_PCH_TRANS_VTOTAL_A, D_ALL);
  1504. MMIO_D(_PCH_TRANS_VBLANK_A, D_ALL);
  1505. MMIO_D(_PCH_TRANS_VSYNC_A, D_ALL);
  1506. MMIO_D(_PCH_TRANS_VSYNCSHIFT_A, D_ALL);
  1507. MMIO_D(_PCH_TRANS_HTOTAL_B, D_ALL);
  1508. MMIO_D(_PCH_TRANS_HBLANK_B, D_ALL);
  1509. MMIO_D(_PCH_TRANS_HSYNC_B, D_ALL);
  1510. MMIO_D(_PCH_TRANS_VTOTAL_B, D_ALL);
  1511. MMIO_D(_PCH_TRANS_VBLANK_B, D_ALL);
  1512. MMIO_D(_PCH_TRANS_VSYNC_B, D_ALL);
  1513. MMIO_D(_PCH_TRANS_VSYNCSHIFT_B, D_ALL);
  1514. MMIO_D(_PCH_TRANSA_DATA_M1, D_ALL);
  1515. MMIO_D(_PCH_TRANSA_DATA_N1, D_ALL);
  1516. MMIO_D(_PCH_TRANSA_DATA_M2, D_ALL);
  1517. MMIO_D(_PCH_TRANSA_DATA_N2, D_ALL);
  1518. MMIO_D(_PCH_TRANSA_LINK_M1, D_ALL);
  1519. MMIO_D(_PCH_TRANSA_LINK_N1, D_ALL);
  1520. MMIO_D(_PCH_TRANSA_LINK_M2, D_ALL);
  1521. MMIO_D(_PCH_TRANSA_LINK_N2, D_ALL);
  1522. MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
  1523. MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
  1524. MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
  1525. MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
  1526. MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
  1527. MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
  1528. MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
  1529. MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
  1530. MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
  1531. MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
  1532. MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
  1533. MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
  1534. MMIO_D(_FDI_RXA_MISC, D_ALL);
  1535. MMIO_D(_FDI_RXB_MISC, D_ALL);
  1536. MMIO_D(_FDI_RXA_TUSIZE1, D_ALL);
  1537. MMIO_D(_FDI_RXA_TUSIZE2, D_ALL);
  1538. MMIO_D(_FDI_RXB_TUSIZE1, D_ALL);
  1539. MMIO_D(_FDI_RXB_TUSIZE2, D_ALL);
  1540. MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
  1541. MMIO_D(PCH_PP_DIVISOR, D_ALL);
  1542. MMIO_D(PCH_PP_STATUS, D_ALL);
  1543. MMIO_D(PCH_LVDS, D_ALL);
  1544. MMIO_D(_PCH_DPLL_A, D_ALL);
  1545. MMIO_D(_PCH_DPLL_B, D_ALL);
  1546. MMIO_D(_PCH_FPA0, D_ALL);
  1547. MMIO_D(_PCH_FPA1, D_ALL);
  1548. MMIO_D(_PCH_FPB0, D_ALL);
  1549. MMIO_D(_PCH_FPB1, D_ALL);
  1550. MMIO_D(PCH_DREF_CONTROL, D_ALL);
  1551. MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
  1552. MMIO_D(PCH_DPLL_SEL, D_ALL);
  1553. MMIO_D(0x61208, D_ALL);
  1554. MMIO_D(0x6120c, D_ALL);
  1555. MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
  1556. MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
  1557. MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL);
  1558. MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
  1559. MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
  1560. MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
  1561. MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL);
  1562. MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL);
  1563. MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
  1564. PORTA_HOTPLUG_STATUS_MASK
  1565. | PORTB_HOTPLUG_STATUS_MASK
  1566. | PORTC_HOTPLUG_STATUS_MASK
  1567. | PORTD_HOTPLUG_STATUS_MASK,
  1568. NULL, NULL);
  1569. MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
  1570. MMIO_D(FUSE_STRAP, D_ALL);
  1571. MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
  1572. MMIO_D(DISP_ARB_CTL, D_ALL);
  1573. MMIO_D(DISP_ARB_CTL2, D_ALL);
  1574. MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
  1575. MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
  1576. MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
  1577. MMIO_D(SOUTH_CHICKEN1, D_ALL);
  1578. MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
  1579. MMIO_D(_TRANSA_CHICKEN1, D_ALL);
  1580. MMIO_D(_TRANSB_CHICKEN1, D_ALL);
  1581. MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
  1582. MMIO_D(_TRANSA_CHICKEN2, D_ALL);
  1583. MMIO_D(_TRANSB_CHICKEN2, D_ALL);
  1584. MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
  1585. MMIO_D(ILK_DPFC_CONTROL, D_ALL);
  1586. MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL);
  1587. MMIO_D(ILK_DPFC_STATUS, D_ALL);
  1588. MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL);
  1589. MMIO_D(ILK_DPFC_CHICKEN, D_ALL);
  1590. MMIO_D(ILK_FBC_RT_BASE, D_ALL);
  1591. MMIO_D(IPS_CTL, D_ALL);
  1592. MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
  1593. MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
  1594. MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
  1595. MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
  1596. MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
  1597. MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
  1598. MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
  1599. MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
  1600. MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
  1601. MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
  1602. MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
  1603. MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
  1604. MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
  1605. MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
  1606. MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
  1607. MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
  1608. MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
  1609. MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
  1610. MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
  1611. MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
  1612. MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
  1613. MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
  1614. MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
  1615. MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
  1616. MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
  1617. MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
  1618. MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
  1619. MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
  1620. MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
  1621. MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
  1622. MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
  1623. MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
  1624. MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
  1625. MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
  1626. MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
  1627. MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
  1628. MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
  1629. MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
  1630. MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
  1631. MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
  1632. MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
  1633. MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
  1634. MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
  1635. MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
  1636. MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
  1637. MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
  1638. MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
  1639. MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
  1640. MMIO_D(0x60110, D_ALL);
  1641. MMIO_D(0x61110, D_ALL);
  1642. MMIO_F(0x70400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
  1643. MMIO_F(0x71400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
  1644. MMIO_F(0x72400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
  1645. MMIO_F(0x70440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1646. MMIO_F(0x71440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1647. MMIO_F(0x72440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1648. MMIO_F(0x7044c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1649. MMIO_F(0x7144c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1650. MMIO_F(0x7244c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
  1651. MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
  1652. MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
  1653. MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
  1654. MMIO_D(SPLL_CTL, D_ALL);
  1655. MMIO_D(_WRPLL_CTL1, D_ALL);
  1656. MMIO_D(_WRPLL_CTL2, D_ALL);
  1657. MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
  1658. MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
  1659. MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
  1660. MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
  1661. MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
  1662. MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
  1663. MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
  1664. MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
  1665. MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
  1666. MMIO_D(0x46508, D_ALL);
  1667. MMIO_D(0x49080, D_ALL);
  1668. MMIO_D(0x49180, D_ALL);
  1669. MMIO_D(0x49280, D_ALL);
  1670. MMIO_F(0x49090, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
  1671. MMIO_F(0x49190, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
  1672. MMIO_F(0x49290, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
  1673. MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
  1674. MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
  1675. MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
  1676. MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
  1677. MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
  1678. MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
  1679. MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
  1680. MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
  1681. MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
  1682. MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
  1683. MMIO_D(SBI_ADDR, D_ALL);
  1684. MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
  1685. MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
  1686. MMIO_D(PIXCLK_GATE, D_ALL);
  1687. MMIO_F(_DPA_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_ALL, NULL,
  1688. dp_aux_ch_ctl_mmio_write);
  1689. MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  1690. MMIO_DH(DDI_BUF_CTL(PORT_B), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  1691. MMIO_DH(DDI_BUF_CTL(PORT_C), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  1692. MMIO_DH(DDI_BUF_CTL(PORT_D), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  1693. MMIO_DH(DDI_BUF_CTL(PORT_E), D_ALL, NULL, ddi_buf_ctl_mmio_write);
  1694. MMIO_DH(DP_TP_CTL(PORT_A), D_ALL, NULL, dp_tp_ctl_mmio_write);
  1695. MMIO_DH(DP_TP_CTL(PORT_B), D_ALL, NULL, dp_tp_ctl_mmio_write);
  1696. MMIO_DH(DP_TP_CTL(PORT_C), D_ALL, NULL, dp_tp_ctl_mmio_write);
  1697. MMIO_DH(DP_TP_CTL(PORT_D), D_ALL, NULL, dp_tp_ctl_mmio_write);
  1698. MMIO_DH(DP_TP_CTL(PORT_E), D_ALL, NULL, dp_tp_ctl_mmio_write);
  1699. MMIO_DH(DP_TP_STATUS(PORT_A), D_ALL, NULL, dp_tp_status_mmio_write);
  1700. MMIO_DH(DP_TP_STATUS(PORT_B), D_ALL, NULL, dp_tp_status_mmio_write);
  1701. MMIO_DH(DP_TP_STATUS(PORT_C), D_ALL, NULL, dp_tp_status_mmio_write);
  1702. MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
  1703. MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
  1704. MMIO_F(_DDI_BUF_TRANS_A, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  1705. MMIO_F(0x64e60, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  1706. MMIO_F(0x64eC0, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  1707. MMIO_F(0x64f20, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  1708. MMIO_F(0x64f80, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
  1709. MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
  1710. MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
  1711. MMIO_DH(_TRANS_DDI_FUNC_CTL_A, D_ALL, NULL, NULL);
  1712. MMIO_DH(_TRANS_DDI_FUNC_CTL_B, D_ALL, NULL, NULL);
  1713. MMIO_DH(_TRANS_DDI_FUNC_CTL_C, D_ALL, NULL, NULL);
  1714. MMIO_DH(_TRANS_DDI_FUNC_CTL_EDP, D_ALL, NULL, NULL);
  1715. MMIO_D(_TRANSA_MSA_MISC, D_ALL);
  1716. MMIO_D(_TRANSB_MSA_MISC, D_ALL);
  1717. MMIO_D(_TRANSC_MSA_MISC, D_ALL);
  1718. MMIO_D(_TRANS_EDP_MSA_MISC, D_ALL);
  1719. MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
  1720. MMIO_D(FORCEWAKE_ACK, D_ALL);
  1721. MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
  1722. MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
  1723. MMIO_D(GTFIFODBG, D_ALL);
  1724. MMIO_D(GTFIFOCTL, D_ALL);
  1725. MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
  1726. MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
  1727. MMIO_D(ECOBUS, D_ALL);
  1728. MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
  1729. MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
  1730. MMIO_D(GEN6_RPNSWREQ, D_ALL);
  1731. MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
  1732. MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
  1733. MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
  1734. MMIO_D(GEN6_RPSTAT1, D_ALL);
  1735. MMIO_D(GEN6_RP_CONTROL, D_ALL);
  1736. MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
  1737. MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
  1738. MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
  1739. MMIO_D(GEN6_RP_CUR_UP, D_ALL);
  1740. MMIO_D(GEN6_RP_PREV_UP, D_ALL);
  1741. MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
  1742. MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
  1743. MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
  1744. MMIO_D(GEN6_RP_UP_EI, D_ALL);
  1745. MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
  1746. MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
  1747. MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
  1748. MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
  1749. MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
  1750. MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
  1751. MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
  1752. MMIO_D(GEN6_RC_SLEEP, D_ALL);
  1753. MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
  1754. MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
  1755. MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
  1756. MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
  1757. MMIO_D(GEN6_PMINTRMSK, D_ALL);
  1758. MMIO_DH(HSW_PWR_WELL_BIOS, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
  1759. MMIO_DH(HSW_PWR_WELL_DRIVER, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
  1760. MMIO_DH(HSW_PWR_WELL_KVMR, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
  1761. MMIO_DH(HSW_PWR_WELL_DEBUG, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
  1762. MMIO_DH(HSW_PWR_WELL_CTL5, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
  1763. MMIO_DH(HSW_PWR_WELL_CTL6, D_HSW | D_BDW, NULL, power_well_ctl_mmio_write);
  1764. MMIO_D(RSTDBYCTL, D_ALL);
  1765. MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
  1766. MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
  1767. MMIO_F(VGT_PVINFO_PAGE, VGT_PVINFO_SIZE, F_UNALIGN, 0, 0, D_ALL, pvinfo_mmio_read, pvinfo_mmio_write);
  1768. MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
  1769. MMIO_F(MCHBAR_MIRROR_BASE_SNB, 0x40000, 0, 0, 0, D_ALL, NULL, NULL);
  1770. MMIO_D(TILECTL, D_ALL);
  1771. MMIO_D(GEN6_UCGCTL1, D_ALL);
  1772. MMIO_D(GEN6_UCGCTL2, D_ALL);
  1773. MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
  1774. MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
  1775. MMIO_D(GEN6_PCODE_DATA, D_ALL);
  1776. MMIO_D(0x13812c, D_ALL);
  1777. MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
  1778. MMIO_D(HSW_EDRAM_CAP, D_ALL);
  1779. MMIO_D(HSW_IDICR, D_ALL);
  1780. MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
  1781. MMIO_D(0x3c, D_ALL);
  1782. MMIO_D(0x860, D_ALL);
  1783. MMIO_D(ECOSKPD, D_ALL);
  1784. MMIO_D(0x121d0, D_ALL);
  1785. MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
  1786. MMIO_D(0x41d0, D_ALL);
  1787. MMIO_D(GAC_ECO_BITS, D_ALL);
  1788. MMIO_D(0x6200, D_ALL);
  1789. MMIO_D(0x6204, D_ALL);
  1790. MMIO_D(0x6208, D_ALL);
  1791. MMIO_D(0x7118, D_ALL);
  1792. MMIO_D(0x7180, D_ALL);
  1793. MMIO_D(0x7408, D_ALL);
  1794. MMIO_D(0x7c00, D_ALL);
  1795. MMIO_D(GEN6_MBCTL, D_ALL);
  1796. MMIO_D(0x911c, D_ALL);
  1797. MMIO_D(0x9120, D_ALL);
  1798. MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1799. MMIO_D(GAB_CTL, D_ALL);
  1800. MMIO_D(0x48800, D_ALL);
  1801. MMIO_D(0xce044, D_ALL);
  1802. MMIO_D(0xe6500, D_ALL);
  1803. MMIO_D(0xe6504, D_ALL);
  1804. MMIO_D(0xe6600, D_ALL);
  1805. MMIO_D(0xe6604, D_ALL);
  1806. MMIO_D(0xe6700, D_ALL);
  1807. MMIO_D(0xe6704, D_ALL);
  1808. MMIO_D(0xe6800, D_ALL);
  1809. MMIO_D(0xe6804, D_ALL);
  1810. MMIO_D(PCH_GMBUS4, D_ALL);
  1811. MMIO_D(PCH_GMBUS5, D_ALL);
  1812. MMIO_D(0x902c, D_ALL);
  1813. MMIO_D(0xec008, D_ALL);
  1814. MMIO_D(0xec00c, D_ALL);
  1815. MMIO_D(0xec008 + 0x18, D_ALL);
  1816. MMIO_D(0xec00c + 0x18, D_ALL);
  1817. MMIO_D(0xec008 + 0x18 * 2, D_ALL);
  1818. MMIO_D(0xec00c + 0x18 * 2, D_ALL);
  1819. MMIO_D(0xec008 + 0x18 * 3, D_ALL);
  1820. MMIO_D(0xec00c + 0x18 * 3, D_ALL);
  1821. MMIO_D(0xec408, D_ALL);
  1822. MMIO_D(0xec40c, D_ALL);
  1823. MMIO_D(0xec408 + 0x18, D_ALL);
  1824. MMIO_D(0xec40c + 0x18, D_ALL);
  1825. MMIO_D(0xec408 + 0x18 * 2, D_ALL);
  1826. MMIO_D(0xec40c + 0x18 * 2, D_ALL);
  1827. MMIO_D(0xec408 + 0x18 * 3, D_ALL);
  1828. MMIO_D(0xec40c + 0x18 * 3, D_ALL);
  1829. MMIO_D(0xfc810, D_ALL);
  1830. MMIO_D(0xfc81c, D_ALL);
  1831. MMIO_D(0xfc828, D_ALL);
  1832. MMIO_D(0xfc834, D_ALL);
  1833. MMIO_D(0xfcc00, D_ALL);
  1834. MMIO_D(0xfcc0c, D_ALL);
  1835. MMIO_D(0xfcc18, D_ALL);
  1836. MMIO_D(0xfcc24, D_ALL);
  1837. MMIO_D(0xfd000, D_ALL);
  1838. MMIO_D(0xfd00c, D_ALL);
  1839. MMIO_D(0xfd018, D_ALL);
  1840. MMIO_D(0xfd024, D_ALL);
  1841. MMIO_D(0xfd034, D_ALL);
  1842. MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
  1843. MMIO_D(0x2054, D_ALL);
  1844. MMIO_D(0x12054, D_ALL);
  1845. MMIO_D(0x22054, D_ALL);
  1846. MMIO_D(0x1a054, D_ALL);
  1847. MMIO_D(0x44070, D_ALL);
  1848. MMIO_D(0x215c, D_HSW_PLUS);
  1849. MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1850. MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1851. MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1852. MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
  1853. MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
  1854. MMIO_D(GEN7_OACONTROL, D_HSW);
  1855. MMIO_D(0x2b00, D_BDW_PLUS);
  1856. MMIO_D(0x2360, D_BDW_PLUS);
  1857. MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
  1858. MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
  1859. MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
  1860. MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  1861. MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  1862. MMIO_D(BCS_SWCTRL, D_ALL);
  1863. MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
  1864. MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
  1865. MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
  1866. MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
  1867. MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
  1868. MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
  1869. MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
  1870. MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
  1871. MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
  1872. MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
  1873. MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
  1874. MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  1875. MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  1876. MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  1877. MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  1878. MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
  1879. MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  1880. return 0;
  1881. }
  1882. static int init_broadwell_mmio_info(struct intel_gvt *gvt)
  1883. {
  1884. struct drm_i915_private *dev_priv = gvt->dev_priv;
  1885. int ret;
  1886. MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
  1887. intel_vgpu_reg_imr_handler);
  1888. MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  1889. MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  1890. MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  1891. MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
  1892. MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  1893. MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  1894. MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  1895. MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
  1896. MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  1897. MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  1898. MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  1899. MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
  1900. MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  1901. MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  1902. MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  1903. MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
  1904. MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
  1905. intel_vgpu_reg_imr_handler);
  1906. MMIO_DH(GEN8_DE_PIPE_IER(PIPE_A), D_BDW_PLUS, NULL,
  1907. intel_vgpu_reg_ier_handler);
  1908. MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
  1909. intel_vgpu_reg_iir_handler);
  1910. MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
  1911. MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
  1912. intel_vgpu_reg_imr_handler);
  1913. MMIO_DH(GEN8_DE_PIPE_IER(PIPE_B), D_BDW_PLUS, NULL,
  1914. intel_vgpu_reg_ier_handler);
  1915. MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
  1916. intel_vgpu_reg_iir_handler);
  1917. MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
  1918. MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
  1919. intel_vgpu_reg_imr_handler);
  1920. MMIO_DH(GEN8_DE_PIPE_IER(PIPE_C), D_BDW_PLUS, NULL,
  1921. intel_vgpu_reg_ier_handler);
  1922. MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
  1923. intel_vgpu_reg_iir_handler);
  1924. MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
  1925. MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  1926. MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  1927. MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  1928. MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
  1929. MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  1930. MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  1931. MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  1932. MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
  1933. MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
  1934. MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
  1935. MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
  1936. MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
  1937. MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
  1938. intel_vgpu_reg_master_irq_handler);
  1939. MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
  1940. MMIO_D(0x1c134, D_BDW_PLUS);
  1941. MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
  1942. MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
  1943. MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
  1944. MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
  1945. MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
  1946. MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
  1947. MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
  1948. MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
  1949. NULL, NULL);
  1950. MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
  1951. NULL, NULL);
  1952. MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
  1953. ring_timestamp_mmio_read, NULL);
  1954. MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
  1955. #define RING_REG(base) (base + 0xd0)
  1956. MMIO_RING_F(RING_REG, 4, F_RO, 0,
  1957. ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
  1958. ring_reset_ctl_write);
  1959. MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
  1960. ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
  1961. ring_reset_ctl_write);
  1962. #undef RING_REG
  1963. #define RING_REG(base) (base + 0x230)
  1964. MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
  1965. MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
  1966. #undef RING_REG
  1967. #define RING_REG(base) (base + 0x234)
  1968. MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
  1969. MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
  1970. #undef RING_REG
  1971. #define RING_REG(base) (base + 0x244)
  1972. MMIO_RING_D(RING_REG, D_BDW_PLUS);
  1973. MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
  1974. #undef RING_REG
  1975. #define RING_REG(base) (base + 0x370)
  1976. MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
  1977. MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
  1978. NULL, NULL);
  1979. #undef RING_REG
  1980. #define RING_REG(base) (base + 0x3a0)
  1981. MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
  1982. MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
  1983. #undef RING_REG
  1984. MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
  1985. MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
  1986. MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
  1987. MMIO_D(0x1c1d0, D_BDW_PLUS);
  1988. MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
  1989. MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
  1990. MMIO_D(0x1c054, D_BDW_PLUS);
  1991. MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
  1992. MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
  1993. MMIO_D(GAMTARBMODE, D_BDW_PLUS);
  1994. #define RING_REG(base) (base + 0x270)
  1995. MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
  1996. MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
  1997. #undef RING_REG
  1998. MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
  1999. MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
  2000. MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2001. MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
  2002. MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
  2003. MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
  2004. MMIO_D(WM_MISC, D_BDW);
  2005. MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
  2006. MMIO_D(0x66c00, D_BDW_PLUS);
  2007. MMIO_D(0x66c04, D_BDW_PLUS);
  2008. MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
  2009. MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
  2010. MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
  2011. MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
  2012. MMIO_D(0xfdc, D_BDW);
  2013. MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2014. MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
  2015. MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
  2016. MMIO_D(0xb1f0, D_BDW);
  2017. MMIO_D(0xb1c0, D_BDW);
  2018. MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2019. MMIO_D(0xb100, D_BDW);
  2020. MMIO_D(0xb10c, D_BDW);
  2021. MMIO_D(0xb110, D_BDW);
  2022. MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2023. MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2024. MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2025. MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
  2026. MMIO_D(0x83a4, D_BDW);
  2027. MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
  2028. MMIO_D(0x8430, D_BDW);
  2029. MMIO_D(0x110000, D_BDW_PLUS);
  2030. MMIO_D(0x48400, D_BDW_PLUS);
  2031. MMIO_D(0x6e570, D_BDW_PLUS);
  2032. MMIO_D(0x65f10, D_BDW_PLUS);
  2033. MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2034. MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2035. MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
  2036. MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
  2037. MMIO_D(0x2248, D_BDW);
  2038. return 0;
  2039. }
  2040. static int init_skl_mmio_info(struct intel_gvt *gvt)
  2041. {
  2042. struct drm_i915_private *dev_priv = gvt->dev_priv;
  2043. int ret;
  2044. MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
  2045. MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL);
  2046. MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
  2047. MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL);
  2048. MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
  2049. MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
  2050. MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
  2051. MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
  2052. MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
  2053. MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
  2054. MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
  2055. MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
  2056. MMIO_D(0xa210, D_SKL_PLUS);
  2057. MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
  2058. MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
  2059. MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
  2060. MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
  2061. MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
  2062. MMIO_D(0x45504, D_SKL);
  2063. MMIO_D(0x45520, D_SKL);
  2064. MMIO_D(0x46000, D_SKL);
  2065. MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write);
  2066. MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write);
  2067. MMIO_D(0x6C040, D_SKL);
  2068. MMIO_D(0x6C048, D_SKL);
  2069. MMIO_D(0x6C050, D_SKL);
  2070. MMIO_D(0x6C044, D_SKL);
  2071. MMIO_D(0x6C04C, D_SKL);
  2072. MMIO_D(0x6C054, D_SKL);
  2073. MMIO_D(0x6c058, D_SKL);
  2074. MMIO_D(0x6c05c, D_SKL);
  2075. MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL);
  2076. MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write);
  2077. MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write);
  2078. MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write);
  2079. MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write);
  2080. MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write);
  2081. MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write);
  2082. MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write);
  2083. MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write);
  2084. MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write);
  2085. MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write);
  2086. MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write);
  2087. MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write);
  2088. MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write);
  2089. MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write);
  2090. MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write);
  2091. MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write);
  2092. MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write);
  2093. MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write);
  2094. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
  2095. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
  2096. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
  2097. MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
  2098. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
  2099. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
  2100. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
  2101. MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
  2102. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
  2103. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
  2104. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
  2105. MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
  2106. MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL);
  2107. MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL);
  2108. MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL);
  2109. MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2110. MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2111. MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2112. MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2113. MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2114. MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2115. MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2116. MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2117. MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2118. MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2119. MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2120. MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
  2121. MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL);
  2122. MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL);
  2123. MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL);
  2124. MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL);
  2125. MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL);
  2126. MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL);
  2127. MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL);
  2128. MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL);
  2129. MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL);
  2130. MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL);
  2131. MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL);
  2132. MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL);
  2133. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
  2134. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
  2135. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
  2136. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
  2137. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
  2138. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
  2139. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
  2140. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
  2141. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
  2142. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
  2143. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
  2144. MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
  2145. MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL);
  2146. MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL);
  2147. MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL);
  2148. MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL);
  2149. MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL);
  2150. MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL);
  2151. MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL);
  2152. MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL);
  2153. MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL);
  2154. MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL);
  2155. MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL);
  2156. MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL);
  2157. MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL);
  2158. MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL);
  2159. MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL);
  2160. MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL);
  2161. MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL);
  2162. MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL);
  2163. MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL);
  2164. MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL);
  2165. MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL);
  2166. MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL);
  2167. MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL);
  2168. MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL);
  2169. MMIO_D(0x70380, D_SKL);
  2170. MMIO_D(0x71380, D_SKL);
  2171. MMIO_D(0x72380, D_SKL);
  2172. MMIO_D(0x7039c, D_SKL);
  2173. MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL);
  2174. MMIO_D(0x8f074, D_SKL);
  2175. MMIO_D(0x8f004, D_SKL);
  2176. MMIO_D(0x8f034, D_SKL);
  2177. MMIO_D(0xb11c, D_SKL);
  2178. MMIO_D(0x51000, D_SKL);
  2179. MMIO_D(0x6c00c, D_SKL);
  2180. MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
  2181. MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
  2182. MMIO_D(0xd08, D_SKL);
  2183. MMIO_D(0x20e0, D_SKL);
  2184. MMIO_D(0x20ec, D_SKL);
  2185. /* TRTT */
  2186. MMIO_D(0x4de0, D_SKL);
  2187. MMIO_D(0x4de4, D_SKL);
  2188. MMIO_D(0x4de8, D_SKL);
  2189. MMIO_D(0x4dec, D_SKL);
  2190. MMIO_D(0x4df0, D_SKL);
  2191. MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
  2192. MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
  2193. MMIO_D(0x45008, D_SKL);
  2194. MMIO_D(0x46430, D_SKL);
  2195. MMIO_D(0x46520, D_SKL);
  2196. MMIO_D(0xc403c, D_SKL);
  2197. MMIO_D(0xb004, D_SKL);
  2198. MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
  2199. MMIO_D(0x65900, D_SKL);
  2200. MMIO_D(0x1082c0, D_SKL);
  2201. MMIO_D(0x4068, D_SKL);
  2202. MMIO_D(0x67054, D_SKL);
  2203. MMIO_D(0x6e560, D_SKL);
  2204. MMIO_D(0x6e554, D_SKL);
  2205. MMIO_D(0x2b20, D_SKL);
  2206. MMIO_D(0x65f00, D_SKL);
  2207. MMIO_D(0x65f08, D_SKL);
  2208. MMIO_D(0x320f0, D_SKL);
  2209. MMIO_D(_REG_VCS2_EXCC, D_SKL);
  2210. MMIO_D(0x70034, D_SKL);
  2211. MMIO_D(0x71034, D_SKL);
  2212. MMIO_D(0x72034, D_SKL);
  2213. MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL);
  2214. MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL);
  2215. MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL);
  2216. MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL);
  2217. MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL);
  2218. MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
  2219. MMIO_D(0x44500, D_SKL);
  2220. return 0;
  2221. }
  2222. /**
  2223. * intel_gvt_find_mmio_info - find MMIO information entry by aligned offset
  2224. * @gvt: GVT device
  2225. * @offset: register offset
  2226. *
  2227. * This function is used to find the MMIO information entry from hash table
  2228. *
  2229. * Returns:
  2230. * pointer to MMIO information entry, NULL if not exists
  2231. */
  2232. struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
  2233. unsigned int offset)
  2234. {
  2235. struct intel_gvt_mmio_info *e;
  2236. WARN_ON(!IS_ALIGNED(offset, 4));
  2237. hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) {
  2238. if (e->offset == offset)
  2239. return e;
  2240. }
  2241. return NULL;
  2242. }
  2243. /**
  2244. * intel_gvt_clean_mmio_info - clean up MMIO information table for GVT device
  2245. * @gvt: GVT device
  2246. *
  2247. * This function is called at the driver unloading stage, to clean up the MMIO
  2248. * information table of GVT device
  2249. *
  2250. */
  2251. void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
  2252. {
  2253. struct hlist_node *tmp;
  2254. struct intel_gvt_mmio_info *e;
  2255. int i;
  2256. hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
  2257. kfree(e);
  2258. vfree(gvt->mmio.mmio_attribute);
  2259. gvt->mmio.mmio_attribute = NULL;
  2260. }
  2261. /**
  2262. * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
  2263. * @gvt: GVT device
  2264. *
  2265. * This function is called at the initialization stage, to setup the MMIO
  2266. * information table for GVT device
  2267. *
  2268. * Returns:
  2269. * zero on success, negative if failed.
  2270. */
  2271. int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
  2272. {
  2273. struct intel_gvt_device_info *info = &gvt->device_info;
  2274. struct drm_i915_private *dev_priv = gvt->dev_priv;
  2275. int ret;
  2276. gvt->mmio.mmio_attribute = vzalloc(info->mmio_size);
  2277. if (!gvt->mmio.mmio_attribute)
  2278. return -ENOMEM;
  2279. ret = init_generic_mmio_info(gvt);
  2280. if (ret)
  2281. goto err;
  2282. if (IS_BROADWELL(dev_priv)) {
  2283. ret = init_broadwell_mmio_info(gvt);
  2284. if (ret)
  2285. goto err;
  2286. } else if (IS_SKYLAKE(dev_priv)) {
  2287. ret = init_broadwell_mmio_info(gvt);
  2288. if (ret)
  2289. goto err;
  2290. ret = init_skl_mmio_info(gvt);
  2291. if (ret)
  2292. goto err;
  2293. }
  2294. return 0;
  2295. err:
  2296. intel_gvt_clean_mmio_info(gvt);
  2297. return ret;
  2298. }
  2299. /**
  2300. * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
  2301. * @gvt: a GVT device
  2302. * @offset: register offset
  2303. *
  2304. */
  2305. void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
  2306. {
  2307. gvt->mmio.mmio_attribute[offset >> 2] |=
  2308. F_ACCESSED;
  2309. }
  2310. /**
  2311. * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
  2312. * @gvt: a GVT device
  2313. * @offset: register offset
  2314. *
  2315. */
  2316. bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
  2317. unsigned int offset)
  2318. {
  2319. return gvt->mmio.mmio_attribute[offset >> 2] &
  2320. F_CMD_ACCESS;
  2321. }
  2322. /**
  2323. * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
  2324. * @gvt: a GVT device
  2325. * @offset: register offset
  2326. *
  2327. */
  2328. bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
  2329. unsigned int offset)
  2330. {
  2331. return gvt->mmio.mmio_attribute[offset >> 2] &
  2332. F_UNALIGN;
  2333. }
  2334. /**
  2335. * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
  2336. * @gvt: a GVT device
  2337. * @offset: register offset
  2338. *
  2339. */
  2340. void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
  2341. unsigned int offset)
  2342. {
  2343. gvt->mmio.mmio_attribute[offset >> 2] |=
  2344. F_CMD_ACCESSED;
  2345. }
  2346. /**
  2347. * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
  2348. * @gvt: a GVT device
  2349. * @offset: register offset
  2350. *
  2351. * Returns:
  2352. * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
  2353. *
  2354. */
  2355. bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
  2356. {
  2357. return gvt->mmio.mmio_attribute[offset >> 2] &
  2358. F_MODE_MASK;
  2359. }
  2360. /**
  2361. * intel_vgpu_default_mmio_read - default MMIO read handler
  2362. * @vgpu: a vGPU
  2363. * @offset: access offset
  2364. * @p_data: data return buffer
  2365. * @bytes: access data length
  2366. *
  2367. * Returns:
  2368. * Zero on success, negative error code if failed.
  2369. */
  2370. int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
  2371. void *p_data, unsigned int bytes)
  2372. {
  2373. read_vreg(vgpu, offset, p_data, bytes);
  2374. return 0;
  2375. }
  2376. /**
  2377. * intel_t_default_mmio_write - default MMIO write handler
  2378. * @vgpu: a vGPU
  2379. * @offset: access offset
  2380. * @p_data: write data buffer
  2381. * @bytes: access data length
  2382. *
  2383. * Returns:
  2384. * Zero on success, negative error code if failed.
  2385. */
  2386. int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
  2387. void *p_data, unsigned int bytes)
  2388. {
  2389. write_vreg(vgpu, offset, p_data, bytes);
  2390. return 0;
  2391. }