mmio.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. /*
  2. * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. * SOFTWARE.
  22. *
  23. * Authors:
  24. * Ke Yu
  25. * Kevin Tian <kevin.tian@intel.com>
  26. * Dexuan Cui
  27. *
  28. * Contributors:
  29. * Tina Zhang <tina.zhang@intel.com>
  30. * Min He <min.he@intel.com>
  31. * Niu Bing <bing.niu@intel.com>
  32. * Zhi Wang <zhi.a.wang@intel.com>
  33. *
  34. */
  35. #include "i915_drv.h"
  36. #include "gvt.h"
  37. /**
  38. * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
  39. * @vgpu: a vGPU
  40. *
  41. * Returns:
  42. * Zero on success, negative error code if failed
  43. */
  44. int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
  45. {
  46. u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
  47. ~GENMASK(3, 0);
  48. return gpa - gttmmio_gpa;
  49. }
  50. #define reg_is_mmio(gvt, reg) \
  51. (reg >= 0 && reg < gvt->device_info.mmio_size)
  52. #define reg_is_gtt(gvt, reg) \
  53. (reg >= gvt->device_info.gtt_start_offset \
  54. && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
  55. static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
  56. void *p_data, unsigned int bytes, bool read)
  57. {
  58. struct intel_gvt *gvt = NULL;
  59. void *pt = NULL;
  60. unsigned int offset = 0;
  61. if (!vgpu || !p_data)
  62. return;
  63. gvt = vgpu->gvt;
  64. mutex_lock(&gvt->lock);
  65. offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
  66. if (reg_is_mmio(gvt, offset)) {
  67. if (read)
  68. intel_vgpu_default_mmio_read(vgpu, offset, p_data,
  69. bytes);
  70. else
  71. intel_vgpu_default_mmio_write(vgpu, offset, p_data,
  72. bytes);
  73. } else if (reg_is_gtt(gvt, offset) &&
  74. vgpu->gtt.ggtt_mm->virtual_page_table) {
  75. offset -= gvt->device_info.gtt_start_offset;
  76. pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
  77. if (read)
  78. memcpy(p_data, pt, bytes);
  79. else
  80. memcpy(pt, p_data, bytes);
  81. } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
  82. struct intel_vgpu_guest_page *gp;
  83. /* Since we enter the failsafe mode early during guest boot,
  84. * guest may not have chance to set up its ppgtt table, so
  85. * there should not be any wp pages for guest. Keep the wp
  86. * related code here in case we need to handle it in furture.
  87. */
  88. gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
  89. if (gp) {
  90. /* remove write protection to prevent furture traps */
  91. intel_vgpu_clean_guest_page(vgpu, gp);
  92. if (read)
  93. intel_gvt_hypervisor_read_gpa(vgpu, pa,
  94. p_data, bytes);
  95. else
  96. intel_gvt_hypervisor_write_gpa(vgpu, pa,
  97. p_data, bytes);
  98. }
  99. }
  100. mutex_unlock(&gvt->lock);
  101. }
  102. /**
  103. * intel_vgpu_emulate_mmio_read - emulate MMIO read
  104. * @vgpu: a vGPU
  105. * @pa: guest physical address
  106. * @p_data: data return buffer
  107. * @bytes: access data length
  108. *
  109. * Returns:
  110. * Zero on success, negative error code if failed
  111. */
  112. int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
  113. void *p_data, unsigned int bytes)
  114. {
  115. struct intel_gvt *gvt = vgpu->gvt;
  116. struct intel_gvt_mmio_info *mmio;
  117. unsigned int offset = 0;
  118. int ret = -EINVAL;
  119. if (vgpu->failsafe) {
  120. failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
  121. return 0;
  122. }
  123. mutex_lock(&gvt->lock);
  124. if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
  125. struct intel_vgpu_guest_page *gp;
  126. gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
  127. if (gp) {
  128. ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
  129. p_data, bytes);
  130. if (ret) {
  131. gvt_vgpu_err("guest page read error %d, "
  132. "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
  133. ret, gp->gfn, pa, *(u32 *)p_data,
  134. bytes);
  135. }
  136. mutex_unlock(&gvt->lock);
  137. return ret;
  138. }
  139. }
  140. offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
  141. if (WARN_ON(bytes > 8))
  142. goto err;
  143. if (reg_is_gtt(gvt, offset)) {
  144. if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
  145. goto err;
  146. if (WARN_ON(bytes != 4 && bytes != 8))
  147. goto err;
  148. if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
  149. goto err;
  150. ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
  151. p_data, bytes);
  152. if (ret)
  153. goto err;
  154. mutex_unlock(&gvt->lock);
  155. return ret;
  156. }
  157. if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
  158. ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
  159. mutex_unlock(&gvt->lock);
  160. return ret;
  161. }
  162. if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
  163. goto err;
  164. if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
  165. if (WARN_ON(!IS_ALIGNED(offset, bytes)))
  166. goto err;
  167. }
  168. mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
  169. if (mmio) {
  170. if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
  171. if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
  172. goto err;
  173. if (WARN_ON(mmio->offset != offset))
  174. goto err;
  175. }
  176. ret = mmio->read(vgpu, offset, p_data, bytes);
  177. } else {
  178. ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
  179. if (!vgpu->mmio.disable_warn_untrack) {
  180. gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
  181. offset, bytes, *(u32 *)p_data);
  182. if (offset == 0x206c) {
  183. gvt_vgpu_err("------------------------------------------\n");
  184. gvt_vgpu_err("likely triggers a gfx reset\n");
  185. gvt_vgpu_err("------------------------------------------\n");
  186. vgpu->mmio.disable_warn_untrack = true;
  187. }
  188. }
  189. }
  190. if (ret)
  191. goto err;
  192. intel_gvt_mmio_set_accessed(gvt, offset);
  193. mutex_unlock(&gvt->lock);
  194. return 0;
  195. err:
  196. gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
  197. offset, bytes);
  198. mutex_unlock(&gvt->lock);
  199. return ret;
  200. }
  201. /**
  202. * intel_vgpu_emulate_mmio_write - emulate MMIO write
  203. * @vgpu: a vGPU
  204. * @pa: guest physical address
  205. * @p_data: write data buffer
  206. * @bytes: access data length
  207. *
  208. * Returns:
  209. * Zero on success, negative error code if failed
  210. */
  211. int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
  212. void *p_data, unsigned int bytes)
  213. {
  214. struct intel_gvt *gvt = vgpu->gvt;
  215. struct intel_gvt_mmio_info *mmio;
  216. unsigned int offset = 0;
  217. u32 old_vreg = 0, old_sreg = 0;
  218. int ret = -EINVAL;
  219. if (vgpu->failsafe) {
  220. failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
  221. return 0;
  222. }
  223. mutex_lock(&gvt->lock);
  224. if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
  225. struct intel_vgpu_guest_page *gp;
  226. gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
  227. if (gp) {
  228. ret = gp->handler(gp, pa, p_data, bytes);
  229. if (ret) {
  230. gvt_err("guest page write error %d, "
  231. "gfn 0x%lx, pa 0x%llx, "
  232. "var 0x%x, len %d\n",
  233. ret, gp->gfn, pa,
  234. *(u32 *)p_data, bytes);
  235. }
  236. mutex_unlock(&gvt->lock);
  237. return ret;
  238. }
  239. }
  240. offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
  241. if (WARN_ON(bytes > 8))
  242. goto err;
  243. if (reg_is_gtt(gvt, offset)) {
  244. if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
  245. goto err;
  246. if (WARN_ON(bytes != 4 && bytes != 8))
  247. goto err;
  248. if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
  249. goto err;
  250. ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
  251. p_data, bytes);
  252. if (ret)
  253. goto err;
  254. mutex_unlock(&gvt->lock);
  255. return ret;
  256. }
  257. if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
  258. ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
  259. mutex_unlock(&gvt->lock);
  260. return ret;
  261. }
  262. mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
  263. if (!mmio && !vgpu->mmio.disable_warn_untrack)
  264. gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
  265. vgpu->id, offset, bytes, *(u32 *)p_data);
  266. if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
  267. if (WARN_ON(!IS_ALIGNED(offset, bytes)))
  268. goto err;
  269. }
  270. if (mmio) {
  271. u64 ro_mask = mmio->ro_mask;
  272. if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
  273. if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
  274. goto err;
  275. if (WARN_ON(mmio->offset != offset))
  276. goto err;
  277. }
  278. if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
  279. old_vreg = vgpu_vreg(vgpu, offset);
  280. old_sreg = vgpu_sreg(vgpu, offset);
  281. }
  282. if (!ro_mask) {
  283. ret = mmio->write(vgpu, offset, p_data, bytes);
  284. } else {
  285. /* Protect RO bits like HW */
  286. u64 data = 0;
  287. /* all register bits are RO. */
  288. if (ro_mask == ~(u64)0) {
  289. gvt_vgpu_err("try to write RO reg %x\n",
  290. offset);
  291. ret = 0;
  292. goto out;
  293. }
  294. /* keep the RO bits in the virtual register */
  295. memcpy(&data, p_data, bytes);
  296. data &= ~mmio->ro_mask;
  297. data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
  298. ret = mmio->write(vgpu, offset, &data, bytes);
  299. }
  300. /* higher 16bits of mode ctl regs are mask bits for change */
  301. if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
  302. u32 mask = vgpu_vreg(vgpu, offset) >> 16;
  303. vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
  304. | (vgpu_vreg(vgpu, offset) & mask);
  305. vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
  306. | (vgpu_sreg(vgpu, offset) & mask);
  307. }
  308. } else
  309. ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
  310. bytes);
  311. if (ret)
  312. goto err;
  313. out:
  314. intel_gvt_mmio_set_accessed(gvt, offset);
  315. mutex_unlock(&gvt->lock);
  316. return 0;
  317. err:
  318. gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
  319. bytes);
  320. mutex_unlock(&gvt->lock);
  321. return ret;
  322. }
  323. /**
  324. * intel_vgpu_reset_mmio - reset virtual MMIO space
  325. * @vgpu: a vGPU
  326. *
  327. */
  328. void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
  329. {
  330. struct intel_gvt *gvt = vgpu->gvt;
  331. const struct intel_gvt_device_info *info = &gvt->device_info;
  332. memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
  333. memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
  334. vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
  335. /* set the bit 0:2(Core C-State ) to C0 */
  336. vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
  337. vgpu->mmio.disable_warn_untrack = false;
  338. }
  339. /**
  340. * intel_vgpu_init_mmio - init MMIO space
  341. * @vgpu: a vGPU
  342. *
  343. * Returns:
  344. * Zero on success, negative error code if failed
  345. */
  346. int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
  347. {
  348. const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
  349. vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
  350. if (!vgpu->mmio.vreg)
  351. return -ENOMEM;
  352. vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
  353. intel_vgpu_reset_mmio(vgpu);
  354. return 0;
  355. }
  356. /**
  357. * intel_vgpu_clean_mmio - clean MMIO space
  358. * @vgpu: a vGPU
  359. *
  360. */
  361. void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
  362. {
  363. vfree(vgpu->mmio.vreg);
  364. vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
  365. }