mmio.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. /*
  2. * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. * SOFTWARE.
  22. *
  23. * Authors:
  24. * Ke Yu
  25. * Kevin Tian <kevin.tian@intel.com>
  26. * Dexuan Cui
  27. *
  28. * Contributors:
  29. * Tina Zhang <tina.zhang@intel.com>
  30. * Min He <min.he@intel.com>
  31. * Niu Bing <bing.niu@intel.com>
  32. * Zhi Wang <zhi.a.wang@intel.com>
  33. *
  34. */
  35. #include "i915_drv.h"
  36. #include "gvt.h"
  37. /**
  38. * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
  39. * @vgpu: a vGPU
  40. * @gpa: guest physical address
  41. *
  42. * Returns:
  43. * Zero on success, negative error code if failed
  44. */
  45. int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
  46. {
  47. u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
  48. return gpa - gttmmio_gpa;
  49. }
  50. #define reg_is_mmio(gvt, reg) \
  51. (reg >= 0 && reg < gvt->device_info.mmio_size)
  52. #define reg_is_gtt(gvt, reg) \
  53. (reg >= gvt->device_info.gtt_start_offset \
  54. && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
  55. static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
  56. void *p_data, unsigned int bytes, bool read)
  57. {
  58. struct intel_gvt *gvt = NULL;
  59. void *pt = NULL;
  60. unsigned int offset = 0;
  61. if (!vgpu || !p_data)
  62. return;
  63. gvt = vgpu->gvt;
  64. mutex_lock(&vgpu->vgpu_lock);
  65. offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
  66. if (reg_is_mmio(gvt, offset)) {
  67. if (read)
  68. intel_vgpu_default_mmio_read(vgpu, offset, p_data,
  69. bytes);
  70. else
  71. intel_vgpu_default_mmio_write(vgpu, offset, p_data,
  72. bytes);
  73. } else if (reg_is_gtt(gvt, offset)) {
  74. offset -= gvt->device_info.gtt_start_offset;
  75. pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset;
  76. if (read)
  77. memcpy(p_data, pt, bytes);
  78. else
  79. memcpy(pt, p_data, bytes);
  80. }
  81. mutex_unlock(&vgpu->vgpu_lock);
  82. }
  83. /**
  84. * intel_vgpu_emulate_mmio_read - emulate MMIO read
  85. * @vgpu: a vGPU
  86. * @pa: guest physical address
  87. * @p_data: data return buffer
  88. * @bytes: access data length
  89. *
  90. * Returns:
  91. * Zero on success, negative error code if failed
  92. */
  93. int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
  94. void *p_data, unsigned int bytes)
  95. {
  96. struct intel_gvt *gvt = vgpu->gvt;
  97. unsigned int offset = 0;
  98. int ret = -EINVAL;
  99. if (vgpu->failsafe) {
  100. failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
  101. return 0;
  102. }
  103. mutex_lock(&vgpu->vgpu_lock);
  104. offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
  105. if (WARN_ON(bytes > 8))
  106. goto err;
  107. if (reg_is_gtt(gvt, offset)) {
  108. if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
  109. goto err;
  110. if (WARN_ON(bytes != 4 && bytes != 8))
  111. goto err;
  112. if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
  113. goto err;
  114. ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
  115. p_data, bytes);
  116. if (ret)
  117. goto err;
  118. goto out;
  119. }
  120. if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
  121. ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
  122. goto out;
  123. }
  124. if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
  125. goto err;
  126. if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
  127. if (WARN_ON(!IS_ALIGNED(offset, bytes)))
  128. goto err;
  129. }
  130. ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, true);
  131. if (ret < 0)
  132. goto err;
  133. intel_gvt_mmio_set_accessed(gvt, offset);
  134. ret = 0;
  135. goto out;
  136. err:
  137. gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
  138. offset, bytes);
  139. out:
  140. mutex_unlock(&vgpu->vgpu_lock);
  141. return ret;
  142. }
  143. /**
  144. * intel_vgpu_emulate_mmio_write - emulate MMIO write
  145. * @vgpu: a vGPU
  146. * @pa: guest physical address
  147. * @p_data: write data buffer
  148. * @bytes: access data length
  149. *
  150. * Returns:
  151. * Zero on success, negative error code if failed
  152. */
  153. int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
  154. void *p_data, unsigned int bytes)
  155. {
  156. struct intel_gvt *gvt = vgpu->gvt;
  157. unsigned int offset = 0;
  158. int ret = -EINVAL;
  159. if (vgpu->failsafe) {
  160. failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
  161. return 0;
  162. }
  163. mutex_lock(&vgpu->vgpu_lock);
  164. offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
  165. if (WARN_ON(bytes > 8))
  166. goto err;
  167. if (reg_is_gtt(gvt, offset)) {
  168. if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
  169. goto err;
  170. if (WARN_ON(bytes != 4 && bytes != 8))
  171. goto err;
  172. if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
  173. goto err;
  174. ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
  175. p_data, bytes);
  176. if (ret)
  177. goto err;
  178. goto out;
  179. }
  180. if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
  181. ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
  182. goto out;
  183. }
  184. ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
  185. if (ret < 0)
  186. goto err;
  187. intel_gvt_mmio_set_accessed(gvt, offset);
  188. ret = 0;
  189. goto out;
  190. err:
  191. gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
  192. bytes);
  193. out:
  194. mutex_unlock(&vgpu->vgpu_lock);
  195. return ret;
  196. }
  197. /**
  198. * intel_vgpu_reset_mmio - reset virtual MMIO space
  199. * @vgpu: a vGPU
  200. * @dmlr: whether this is device model level reset
  201. */
  202. void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
  203. {
  204. struct intel_gvt *gvt = vgpu->gvt;
  205. const struct intel_gvt_device_info *info = &gvt->device_info;
  206. void *mmio = gvt->firmware.mmio;
  207. if (dmlr) {
  208. memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
  209. memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
  210. vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
  211. /* set the bit 0:2(Core C-State ) to C0 */
  212. vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
  213. if (IS_BROXTON(vgpu->gvt->dev_priv)) {
  214. vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
  215. ~(BIT(0) | BIT(1));
  216. vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
  217. ~PHY_POWER_GOOD;
  218. vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
  219. ~PHY_POWER_GOOD;
  220. vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
  221. ~BIT(30);
  222. vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
  223. ~BIT(30);
  224. vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
  225. ~BXT_PHY_LANE_ENABLED;
  226. vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
  227. BXT_PHY_CMNLANE_POWERDOWN_ACK |
  228. BXT_PHY_LANE_POWERDOWN_ACK;
  229. vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
  230. ~BXT_PHY_LANE_ENABLED;
  231. vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
  232. BXT_PHY_CMNLANE_POWERDOWN_ACK |
  233. BXT_PHY_LANE_POWERDOWN_ACK;
  234. vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
  235. ~BXT_PHY_LANE_ENABLED;
  236. vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
  237. BXT_PHY_CMNLANE_POWERDOWN_ACK |
  238. BXT_PHY_LANE_POWERDOWN_ACK;
  239. }
  240. } else {
  241. #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
  242. /* only reset the engine related, so starting with 0x44200
  243. * interrupt include DE,display mmio related will not be
  244. * touched
  245. */
  246. memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
  247. memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
  248. }
  249. }
  250. /**
  251. * intel_vgpu_init_mmio - init MMIO space
  252. * @vgpu: a vGPU
  253. *
  254. * Returns:
  255. * Zero on success, negative error code if failed
  256. */
  257. int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
  258. {
  259. const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
  260. vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2));
  261. if (!vgpu->mmio.vreg)
  262. return -ENOMEM;
  263. vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
  264. intel_vgpu_reset_mmio(vgpu, true);
  265. return 0;
  266. }
  267. /**
  268. * intel_vgpu_clean_mmio - clean MMIO space
  269. * @vgpu: a vGPU
  270. *
  271. */
  272. void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
  273. {
  274. vfree(vgpu->mmio.vreg);
  275. vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
  276. }