gtt.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244
  1. /*
  2. * GTT virtualization
  3. *
  4. * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the next
  14. * paragraph) shall be included in all copies or substantial portions of the
  15. * Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23. * SOFTWARE.
  24. *
  25. * Authors:
  26. * Zhi Wang <zhi.a.wang@intel.com>
  27. * Zhenyu Wang <zhenyuw@linux.intel.com>
  28. * Xiao Zheng <xiao.zheng@intel.com>
  29. *
  30. * Contributors:
  31. * Min He <min.he@intel.com>
  32. * Bing Niu <bing.niu@intel.com>
  33. *
  34. */
  35. #include "i915_drv.h"
  36. #include "gvt.h"
  37. #include "i915_pvinfo.h"
  38. #include "trace.h"
  39. static bool enable_out_of_sync = false;
  40. static int preallocated_oos_pages = 8192;
  41. /*
  42. * validate a gm address and related range size,
  43. * translate it to host gm address
  44. */
  45. bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
  46. {
  47. if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
  48. && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
  49. gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
  50. vgpu->id, addr, size);
  51. return false;
  52. }
  53. return true;
  54. }
  55. /* translate a guest gmadr to host gmadr */
  56. int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
  57. {
  58. if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
  59. "invalid guest gmadr %llx\n", g_addr))
  60. return -EACCES;
  61. if (vgpu_gmadr_is_aperture(vgpu, g_addr))
  62. *h_addr = vgpu_aperture_gmadr_base(vgpu)
  63. + (g_addr - vgpu_aperture_offset(vgpu));
  64. else
  65. *h_addr = vgpu_hidden_gmadr_base(vgpu)
  66. + (g_addr - vgpu_hidden_offset(vgpu));
  67. return 0;
  68. }
  69. /* translate a host gmadr to guest gmadr */
  70. int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
  71. {
  72. if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
  73. "invalid host gmadr %llx\n", h_addr))
  74. return -EACCES;
  75. if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
  76. *g_addr = vgpu_aperture_gmadr_base(vgpu)
  77. + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
  78. else
  79. *g_addr = vgpu_hidden_gmadr_base(vgpu)
  80. + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
  81. return 0;
  82. }
  83. int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
  84. unsigned long *h_index)
  85. {
  86. u64 h_addr;
  87. int ret;
  88. ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
  89. &h_addr);
  90. if (ret)
  91. return ret;
  92. *h_index = h_addr >> GTT_PAGE_SHIFT;
  93. return 0;
  94. }
  95. int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
  96. unsigned long *g_index)
  97. {
  98. u64 g_addr;
  99. int ret;
  100. ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
  101. &g_addr);
  102. if (ret)
  103. return ret;
  104. *g_index = g_addr >> GTT_PAGE_SHIFT;
  105. return 0;
  106. }
  107. #define gtt_type_is_entry(type) \
  108. (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
  109. && type != GTT_TYPE_PPGTT_PTE_ENTRY \
  110. && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
  111. #define gtt_type_is_pt(type) \
  112. (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
  113. #define gtt_type_is_pte_pt(type) \
  114. (type == GTT_TYPE_PPGTT_PTE_PT)
  115. #define gtt_type_is_root_pointer(type) \
  116. (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
  117. #define gtt_init_entry(e, t, p, v) do { \
  118. (e)->type = t; \
  119. (e)->pdev = p; \
  120. memcpy(&(e)->val64, &v, sizeof(v)); \
  121. } while (0)
  122. /*
  123. * Mappings between GTT_TYPE* enumerations.
  124. * Following information can be found according to the given type:
  125. * - type of next level page table
  126. * - type of entry inside this level page table
  127. * - type of entry with PSE set
  128. *
  129. * If the given type doesn't have such a kind of information,
  130. * e.g. give a l4 root entry type, then request to get its PSE type,
  131. * give a PTE page table type, then request to get its next level page
  132. * table type, as we know l4 root entry doesn't have a PSE bit,
  133. * and a PTE page table doesn't have a next level page table type,
  134. * GTT_TYPE_INVALID will be returned. This is useful when traversing a
  135. * page table.
  136. */
  137. struct gtt_type_table_entry {
  138. int entry_type;
  139. int next_pt_type;
  140. int pse_entry_type;
  141. };
  142. #define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
  143. [type] = { \
  144. .entry_type = e_type, \
  145. .next_pt_type = npt_type, \
  146. .pse_entry_type = pse_type, \
  147. }
  148. static struct gtt_type_table_entry gtt_type_table[] = {
  149. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
  150. GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
  151. GTT_TYPE_PPGTT_PML4_PT,
  152. GTT_TYPE_INVALID),
  153. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
  154. GTT_TYPE_PPGTT_PML4_ENTRY,
  155. GTT_TYPE_PPGTT_PDP_PT,
  156. GTT_TYPE_INVALID),
  157. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
  158. GTT_TYPE_PPGTT_PML4_ENTRY,
  159. GTT_TYPE_PPGTT_PDP_PT,
  160. GTT_TYPE_INVALID),
  161. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
  162. GTT_TYPE_PPGTT_PDP_ENTRY,
  163. GTT_TYPE_PPGTT_PDE_PT,
  164. GTT_TYPE_PPGTT_PTE_1G_ENTRY),
  165. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
  166. GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
  167. GTT_TYPE_PPGTT_PDE_PT,
  168. GTT_TYPE_PPGTT_PTE_1G_ENTRY),
  169. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
  170. GTT_TYPE_PPGTT_PDP_ENTRY,
  171. GTT_TYPE_PPGTT_PDE_PT,
  172. GTT_TYPE_PPGTT_PTE_1G_ENTRY),
  173. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
  174. GTT_TYPE_PPGTT_PDE_ENTRY,
  175. GTT_TYPE_PPGTT_PTE_PT,
  176. GTT_TYPE_PPGTT_PTE_2M_ENTRY),
  177. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
  178. GTT_TYPE_PPGTT_PDE_ENTRY,
  179. GTT_TYPE_PPGTT_PTE_PT,
  180. GTT_TYPE_PPGTT_PTE_2M_ENTRY),
  181. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
  182. GTT_TYPE_PPGTT_PTE_4K_ENTRY,
  183. GTT_TYPE_INVALID,
  184. GTT_TYPE_INVALID),
  185. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
  186. GTT_TYPE_PPGTT_PTE_4K_ENTRY,
  187. GTT_TYPE_INVALID,
  188. GTT_TYPE_INVALID),
  189. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
  190. GTT_TYPE_PPGTT_PDE_ENTRY,
  191. GTT_TYPE_INVALID,
  192. GTT_TYPE_PPGTT_PTE_2M_ENTRY),
  193. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
  194. GTT_TYPE_PPGTT_PDP_ENTRY,
  195. GTT_TYPE_INVALID,
  196. GTT_TYPE_PPGTT_PTE_1G_ENTRY),
  197. GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
  198. GTT_TYPE_GGTT_PTE,
  199. GTT_TYPE_INVALID,
  200. GTT_TYPE_INVALID),
  201. };
  202. static inline int get_next_pt_type(int type)
  203. {
  204. return gtt_type_table[type].next_pt_type;
  205. }
  206. static inline int get_entry_type(int type)
  207. {
  208. return gtt_type_table[type].entry_type;
  209. }
  210. static inline int get_pse_type(int type)
  211. {
  212. return gtt_type_table[type].pse_entry_type;
  213. }
  214. static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
  215. {
  216. void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
  217. u64 pte;
  218. #ifdef readq
  219. pte = readq(addr);
  220. #else
  221. pte = ioread32(addr);
  222. pte |= (u64)ioread32(addr + 4) << 32;
  223. #endif
  224. return pte;
  225. }
  226. static void write_pte64(struct drm_i915_private *dev_priv,
  227. unsigned long index, u64 pte)
  228. {
  229. void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
  230. #ifdef writeq
  231. writeq(pte, addr);
  232. #else
  233. iowrite32((u32)pte, addr);
  234. iowrite32(pte >> 32, addr + 4);
  235. #endif
  236. I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
  237. POSTING_READ(GFX_FLSH_CNTL_GEN6);
  238. }
  239. static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
  240. struct intel_gvt_gtt_entry *e,
  241. unsigned long index, bool hypervisor_access, unsigned long gpa,
  242. struct intel_vgpu *vgpu)
  243. {
  244. const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
  245. int ret;
  246. if (WARN_ON(info->gtt_entry_size != 8))
  247. return e;
  248. if (hypervisor_access) {
  249. ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
  250. (index << info->gtt_entry_size_shift),
  251. &e->val64, 8);
  252. WARN_ON(ret);
  253. } else if (!pt) {
  254. e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
  255. } else {
  256. e->val64 = *((u64 *)pt + index);
  257. }
  258. return e;
  259. }
  260. static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
  261. struct intel_gvt_gtt_entry *e,
  262. unsigned long index, bool hypervisor_access, unsigned long gpa,
  263. struct intel_vgpu *vgpu)
  264. {
  265. const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
  266. int ret;
  267. if (WARN_ON(info->gtt_entry_size != 8))
  268. return e;
  269. if (hypervisor_access) {
  270. ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
  271. (index << info->gtt_entry_size_shift),
  272. &e->val64, 8);
  273. WARN_ON(ret);
  274. } else if (!pt) {
  275. write_pte64(vgpu->gvt->dev_priv, index, e->val64);
  276. } else {
  277. *((u64 *)pt + index) = e->val64;
  278. }
  279. return e;
  280. }
  281. #define GTT_HAW 46
  282. #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
  283. #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
  284. #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
  285. static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
  286. {
  287. unsigned long pfn;
  288. if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
  289. pfn = (e->val64 & ADDR_1G_MASK) >> 12;
  290. else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
  291. pfn = (e->val64 & ADDR_2M_MASK) >> 12;
  292. else
  293. pfn = (e->val64 & ADDR_4K_MASK) >> 12;
  294. return pfn;
  295. }
  296. static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
  297. {
  298. if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
  299. e->val64 &= ~ADDR_1G_MASK;
  300. pfn &= (ADDR_1G_MASK >> 12);
  301. } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
  302. e->val64 &= ~ADDR_2M_MASK;
  303. pfn &= (ADDR_2M_MASK >> 12);
  304. } else {
  305. e->val64 &= ~ADDR_4K_MASK;
  306. pfn &= (ADDR_4K_MASK >> 12);
  307. }
  308. e->val64 |= (pfn << 12);
  309. }
  310. static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
  311. {
  312. /* Entry doesn't have PSE bit. */
  313. if (get_pse_type(e->type) == GTT_TYPE_INVALID)
  314. return false;
  315. e->type = get_entry_type(e->type);
  316. if (!(e->val64 & (1 << 7)))
  317. return false;
  318. e->type = get_pse_type(e->type);
  319. return true;
  320. }
  321. static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
  322. {
  323. /*
  324. * i915 writes PDP root pointer registers without present bit,
  325. * it also works, so we need to treat root pointer entry
  326. * specifically.
  327. */
  328. if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
  329. || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
  330. return (e->val64 != 0);
  331. else
  332. return (e->val64 & (1 << 0));
  333. }
  334. static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
  335. {
  336. e->val64 &= ~(1 << 0);
  337. }
  338. /*
  339. * Per-platform GMA routines.
  340. */
  341. static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
  342. {
  343. unsigned long x = (gma >> GTT_PAGE_SHIFT);
  344. trace_gma_index(__func__, gma, x);
  345. return x;
  346. }
  347. #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
  348. static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
  349. { \
  350. unsigned long x = (exp); \
  351. trace_gma_index(__func__, gma, x); \
  352. return x; \
  353. }
  354. DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
  355. DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
  356. DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
  357. DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
  358. DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
  359. static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
  360. .get_entry = gtt_get_entry64,
  361. .set_entry = gtt_set_entry64,
  362. .clear_present = gtt_entry_clear_present,
  363. .test_present = gen8_gtt_test_present,
  364. .test_pse = gen8_gtt_test_pse,
  365. .get_pfn = gen8_gtt_get_pfn,
  366. .set_pfn = gen8_gtt_set_pfn,
  367. };
  368. static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
  369. .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
  370. .gma_to_pte_index = gen8_gma_to_pte_index,
  371. .gma_to_pde_index = gen8_gma_to_pde_index,
  372. .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
  373. .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
  374. .gma_to_pml4_index = gen8_gma_to_pml4_index,
  375. };
  376. static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
  377. struct intel_gvt_gtt_entry *m)
  378. {
  379. struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
  380. unsigned long gfn, mfn;
  381. *m = *p;
  382. if (!ops->test_present(p))
  383. return 0;
  384. gfn = ops->get_pfn(p);
  385. mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
  386. if (mfn == INTEL_GVT_INVALID_ADDR) {
  387. gvt_err("fail to translate gfn: 0x%lx\n", gfn);
  388. return -ENXIO;
  389. }
  390. ops->set_pfn(m, mfn);
  391. return 0;
  392. }
  393. /*
  394. * MM helpers.
  395. */
  396. struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
  397. void *page_table, struct intel_gvt_gtt_entry *e,
  398. unsigned long index)
  399. {
  400. struct intel_gvt *gvt = mm->vgpu->gvt;
  401. struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
  402. e->type = mm->page_table_entry_type;
  403. ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
  404. ops->test_pse(e);
  405. return e;
  406. }
  407. struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
  408. void *page_table, struct intel_gvt_gtt_entry *e,
  409. unsigned long index)
  410. {
  411. struct intel_gvt *gvt = mm->vgpu->gvt;
  412. struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
  413. return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
  414. }
  415. /*
  416. * PPGTT shadow page table helpers.
  417. */
  418. static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
  419. struct intel_vgpu_ppgtt_spt *spt,
  420. void *page_table, int type,
  421. struct intel_gvt_gtt_entry *e, unsigned long index,
  422. bool guest)
  423. {
  424. struct intel_gvt *gvt = spt->vgpu->gvt;
  425. struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
  426. e->type = get_entry_type(type);
  427. if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
  428. return e;
  429. ops->get_entry(page_table, e, index, guest,
  430. spt->guest_page.gfn << GTT_PAGE_SHIFT,
  431. spt->vgpu);
  432. ops->test_pse(e);
  433. return e;
  434. }
  435. static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
  436. struct intel_vgpu_ppgtt_spt *spt,
  437. void *page_table, int type,
  438. struct intel_gvt_gtt_entry *e, unsigned long index,
  439. bool guest)
  440. {
  441. struct intel_gvt *gvt = spt->vgpu->gvt;
  442. struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
  443. if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
  444. return e;
  445. return ops->set_entry(page_table, e, index, guest,
  446. spt->guest_page.gfn << GTT_PAGE_SHIFT,
  447. spt->vgpu);
  448. }
  449. #define ppgtt_get_guest_entry(spt, e, index) \
  450. ppgtt_spt_get_entry(spt, NULL, \
  451. spt->guest_page_type, e, index, true)
  452. #define ppgtt_set_guest_entry(spt, e, index) \
  453. ppgtt_spt_set_entry(spt, NULL, \
  454. spt->guest_page_type, e, index, true)
  455. #define ppgtt_get_shadow_entry(spt, e, index) \
  456. ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
  457. spt->shadow_page.type, e, index, false)
  458. #define ppgtt_set_shadow_entry(spt, e, index) \
  459. ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
  460. spt->shadow_page.type, e, index, false)
  461. /**
  462. * intel_vgpu_init_guest_page - init a guest page data structure
  463. * @vgpu: a vGPU
  464. * @p: a guest page data structure
  465. * @gfn: guest memory page frame number
  466. * @handler: function will be called when target guest memory page has
  467. * been modified.
  468. *
  469. * This function is called when user wants to track a guest memory page.
  470. *
  471. * Returns:
  472. * Zero on success, negative error code if failed.
  473. */
  474. int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
  475. struct intel_vgpu_guest_page *p,
  476. unsigned long gfn,
  477. int (*handler)(void *, u64, void *, int),
  478. void *data)
  479. {
  480. INIT_HLIST_NODE(&p->node);
  481. p->writeprotection = false;
  482. p->gfn = gfn;
  483. p->handler = handler;
  484. p->data = data;
  485. p->oos_page = NULL;
  486. p->write_cnt = 0;
  487. hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
  488. return 0;
  489. }
  490. static int detach_oos_page(struct intel_vgpu *vgpu,
  491. struct intel_vgpu_oos_page *oos_page);
  492. /**
  493. * intel_vgpu_clean_guest_page - release the resource owned by guest page data
  494. * structure
  495. * @vgpu: a vGPU
  496. * @p: a tracked guest page
  497. *
  498. * This function is called when user tries to stop tracking a guest memory
  499. * page.
  500. */
  501. void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
  502. struct intel_vgpu_guest_page *p)
  503. {
  504. if (!hlist_unhashed(&p->node))
  505. hash_del(&p->node);
  506. if (p->oos_page)
  507. detach_oos_page(vgpu, p->oos_page);
  508. if (p->writeprotection)
  509. intel_gvt_hypervisor_unset_wp_page(vgpu, p);
  510. }
  511. /**
  512. * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
  513. * @vgpu: a vGPU
  514. * @gfn: guest memory page frame number
  515. *
  516. * This function is called when emulation logic wants to know if a trapped GFN
  517. * is a tracked guest page.
  518. *
  519. * Returns:
  520. * Pointer to guest page data structure, NULL if failed.
  521. */
  522. struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
  523. struct intel_vgpu *vgpu, unsigned long gfn)
  524. {
  525. struct intel_vgpu_guest_page *p;
  526. hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
  527. p, node, gfn) {
  528. if (p->gfn == gfn)
  529. return p;
  530. }
  531. return NULL;
  532. }
  533. static inline int init_shadow_page(struct intel_vgpu *vgpu,
  534. struct intel_vgpu_shadow_page *p, int type)
  535. {
  536. p->vaddr = page_address(p->page);
  537. p->type = type;
  538. INIT_HLIST_NODE(&p->node);
  539. p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
  540. if (p->mfn == INTEL_GVT_INVALID_ADDR)
  541. return -EFAULT;
  542. hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
  543. return 0;
  544. }
  545. static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
  546. {
  547. if (!hlist_unhashed(&p->node))
  548. hash_del(&p->node);
  549. }
  550. static inline struct intel_vgpu_shadow_page *find_shadow_page(
  551. struct intel_vgpu *vgpu, unsigned long mfn)
  552. {
  553. struct intel_vgpu_shadow_page *p;
  554. hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
  555. p, node, mfn) {
  556. if (p->mfn == mfn)
  557. return p;
  558. }
  559. return NULL;
  560. }
  561. #define guest_page_to_ppgtt_spt(ptr) \
  562. container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
  563. #define shadow_page_to_ppgtt_spt(ptr) \
  564. container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
  565. static void *alloc_spt(gfp_t gfp_mask)
  566. {
  567. struct intel_vgpu_ppgtt_spt *spt;
  568. spt = kzalloc(sizeof(*spt), gfp_mask);
  569. if (!spt)
  570. return NULL;
  571. spt->shadow_page.page = alloc_page(gfp_mask);
  572. if (!spt->shadow_page.page) {
  573. kfree(spt);
  574. return NULL;
  575. }
  576. return spt;
  577. }
  578. static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
  579. {
  580. __free_page(spt->shadow_page.page);
  581. kfree(spt);
  582. }
  583. static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
  584. {
  585. trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
  586. clean_shadow_page(&spt->shadow_page);
  587. intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
  588. list_del_init(&spt->post_shadow_list);
  589. free_spt(spt);
  590. }
  591. static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
  592. {
  593. struct hlist_node *n;
  594. struct intel_vgpu_shadow_page *sp;
  595. int i;
  596. hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
  597. ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
  598. }
  599. static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
  600. u64 pa, void *p_data, int bytes);
  601. static int ppgtt_write_protection_handler(void *gp, u64 pa,
  602. void *p_data, int bytes)
  603. {
  604. struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
  605. int ret;
  606. if (bytes != 4 && bytes != 8)
  607. return -EINVAL;
  608. if (!gpt->writeprotection)
  609. return -EINVAL;
  610. ret = ppgtt_handle_guest_write_page_table_bytes(gp,
  611. pa, p_data, bytes);
  612. if (ret)
  613. return ret;
  614. return ret;
  615. }
  616. static int reclaim_one_mm(struct intel_gvt *gvt);
  617. static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
  618. struct intel_vgpu *vgpu, int type, unsigned long gfn)
  619. {
  620. struct intel_vgpu_ppgtt_spt *spt = NULL;
  621. int ret;
  622. retry:
  623. spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
  624. if (!spt) {
  625. if (reclaim_one_mm(vgpu->gvt))
  626. goto retry;
  627. gvt_err("fail to allocate ppgtt shadow page\n");
  628. return ERR_PTR(-ENOMEM);
  629. }
  630. spt->vgpu = vgpu;
  631. spt->guest_page_type = type;
  632. atomic_set(&spt->refcount, 1);
  633. INIT_LIST_HEAD(&spt->post_shadow_list);
  634. /*
  635. * TODO: guest page type may be different with shadow page type,
  636. * when we support PSE page in future.
  637. */
  638. ret = init_shadow_page(vgpu, &spt->shadow_page, type);
  639. if (ret) {
  640. gvt_err("fail to initialize shadow page for spt\n");
  641. goto err;
  642. }
  643. ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
  644. gfn, ppgtt_write_protection_handler, NULL);
  645. if (ret) {
  646. gvt_err("fail to initialize guest page for spt\n");
  647. goto err;
  648. }
  649. trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
  650. return spt;
  651. err:
  652. ppgtt_free_shadow_page(spt);
  653. return ERR_PTR(ret);
  654. }
  655. static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
  656. struct intel_vgpu *vgpu, unsigned long mfn)
  657. {
  658. struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
  659. if (p)
  660. return shadow_page_to_ppgtt_spt(p);
  661. gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
  662. vgpu->id, mfn);
  663. return NULL;
  664. }
  665. #define pt_entry_size_shift(spt) \
  666. ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
  667. #define pt_entries(spt) \
  668. (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
  669. #define for_each_present_guest_entry(spt, e, i) \
  670. for (i = 0; i < pt_entries(spt); i++) \
  671. if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
  672. ppgtt_get_guest_entry(spt, e, i)))
  673. #define for_each_present_shadow_entry(spt, e, i) \
  674. for (i = 0; i < pt_entries(spt); i++) \
  675. if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
  676. ppgtt_get_shadow_entry(spt, e, i)))
  677. static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
  678. {
  679. int v = atomic_read(&spt->refcount);
  680. trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
  681. atomic_inc(&spt->refcount);
  682. }
  683. static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
  684. static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
  685. struct intel_gvt_gtt_entry *e)
  686. {
  687. struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
  688. struct intel_vgpu_ppgtt_spt *s;
  689. intel_gvt_gtt_type_t cur_pt_type;
  690. if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
  691. return -EINVAL;
  692. if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
  693. && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
  694. cur_pt_type = get_next_pt_type(e->type) + 1;
  695. if (ops->get_pfn(e) ==
  696. vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
  697. return 0;
  698. }
  699. s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
  700. if (!s) {
  701. gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
  702. vgpu->id, ops->get_pfn(e));
  703. return -ENXIO;
  704. }
  705. return ppgtt_invalidate_shadow_page(s);
  706. }
  707. static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
  708. {
  709. struct intel_gvt_gtt_entry e;
  710. unsigned long index;
  711. int ret;
  712. int v = atomic_read(&spt->refcount);
  713. trace_spt_change(spt->vgpu->id, "die", spt,
  714. spt->guest_page.gfn, spt->shadow_page.type);
  715. trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
  716. if (atomic_dec_return(&spt->refcount) > 0)
  717. return 0;
  718. if (gtt_type_is_pte_pt(spt->shadow_page.type))
  719. goto release;
  720. for_each_present_shadow_entry(spt, &e, index) {
  721. if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
  722. gvt_err("GVT doesn't support pse bit for now\n");
  723. return -EINVAL;
  724. }
  725. ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
  726. spt->vgpu, &e);
  727. if (ret)
  728. goto fail;
  729. }
  730. release:
  731. trace_spt_change(spt->vgpu->id, "release", spt,
  732. spt->guest_page.gfn, spt->shadow_page.type);
  733. ppgtt_free_shadow_page(spt);
  734. return 0;
  735. fail:
  736. gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
  737. spt->vgpu->id, spt, e.val64, e.type);
  738. return ret;
  739. }
  740. static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
  741. static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
  742. struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
  743. {
  744. struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
  745. struct intel_vgpu_ppgtt_spt *s = NULL;
  746. struct intel_vgpu_guest_page *g;
  747. int ret;
  748. if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
  749. ret = -EINVAL;
  750. goto fail;
  751. }
  752. g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
  753. if (g) {
  754. s = guest_page_to_ppgtt_spt(g);
  755. ppgtt_get_shadow_page(s);
  756. } else {
  757. int type = get_next_pt_type(we->type);
  758. s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
  759. if (IS_ERR(s)) {
  760. ret = PTR_ERR(s);
  761. goto fail;
  762. }
  763. ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
  764. if (ret)
  765. goto fail;
  766. ret = ppgtt_populate_shadow_page(s);
  767. if (ret)
  768. goto fail;
  769. trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
  770. s->shadow_page.type);
  771. }
  772. return s;
  773. fail:
  774. gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
  775. vgpu->id, s, we->val64, we->type);
  776. return ERR_PTR(ret);
  777. }
  778. static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
  779. struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
  780. {
  781. struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
  782. se->type = ge->type;
  783. se->val64 = ge->val64;
  784. ops->set_pfn(se, s->shadow_page.mfn);
  785. }
  786. static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
  787. {
  788. struct intel_vgpu *vgpu = spt->vgpu;
  789. struct intel_vgpu_ppgtt_spt *s;
  790. struct intel_gvt_gtt_entry se, ge;
  791. unsigned long i;
  792. int ret;
  793. trace_spt_change(spt->vgpu->id, "born", spt,
  794. spt->guest_page.gfn, spt->shadow_page.type);
  795. if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
  796. for_each_present_guest_entry(spt, &ge, i) {
  797. ret = gtt_entry_p2m(vgpu, &ge, &se);
  798. if (ret)
  799. goto fail;
  800. ppgtt_set_shadow_entry(spt, &se, i);
  801. }
  802. return 0;
  803. }
  804. for_each_present_guest_entry(spt, &ge, i) {
  805. if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
  806. gvt_err("GVT doesn't support pse bit now\n");
  807. ret = -EINVAL;
  808. goto fail;
  809. }
  810. s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
  811. if (IS_ERR(s)) {
  812. ret = PTR_ERR(s);
  813. goto fail;
  814. }
  815. ppgtt_get_shadow_entry(spt, &se, i);
  816. ppgtt_generate_shadow_entry(&se, s, &ge);
  817. ppgtt_set_shadow_entry(spt, &se, i);
  818. }
  819. return 0;
  820. fail:
  821. gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
  822. vgpu->id, spt, ge.val64, ge.type);
  823. return ret;
  824. }
  825. static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
  826. unsigned long index)
  827. {
  828. struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
  829. struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
  830. struct intel_vgpu *vgpu = spt->vgpu;
  831. struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
  832. struct intel_gvt_gtt_entry e;
  833. int ret;
  834. ppgtt_get_shadow_entry(spt, &e, index);
  835. trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
  836. index);
  837. if (!ops->test_present(&e))
  838. return 0;
  839. if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
  840. return 0;
  841. if (gtt_type_is_pt(get_next_pt_type(e.type))) {
  842. struct intel_vgpu_ppgtt_spt *s =
  843. ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
  844. if (!s) {
  845. gvt_err("fail to find guest page\n");
  846. ret = -ENXIO;
  847. goto fail;
  848. }
  849. ret = ppgtt_invalidate_shadow_page(s);
  850. if (ret)
  851. goto fail;
  852. }
  853. ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
  854. ppgtt_set_shadow_entry(spt, &e, index);
  855. return 0;
  856. fail:
  857. gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
  858. vgpu->id, spt, e.val64, e.type);
  859. return ret;
  860. }
  861. static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
  862. struct intel_gvt_gtt_entry *we, unsigned long index)
  863. {
  864. struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
  865. struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
  866. struct intel_vgpu *vgpu = spt->vgpu;
  867. struct intel_gvt_gtt_entry m;
  868. struct intel_vgpu_ppgtt_spt *s;
  869. int ret;
  870. trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
  871. we->val64, index);
  872. if (gtt_type_is_pt(get_next_pt_type(we->type))) {
  873. s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
  874. if (IS_ERR(s)) {
  875. ret = PTR_ERR(s);
  876. goto fail;
  877. }
  878. ppgtt_get_shadow_entry(spt, &m, index);
  879. ppgtt_generate_shadow_entry(&m, s, we);
  880. ppgtt_set_shadow_entry(spt, &m, index);
  881. } else {
  882. ret = gtt_entry_p2m(vgpu, we, &m);
  883. if (ret)
  884. goto fail;
  885. ppgtt_set_shadow_entry(spt, &m, index);
  886. }
  887. return 0;
  888. fail:
  889. gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
  890. spt, we->val64, we->type);
  891. return ret;
  892. }
  893. static int sync_oos_page(struct intel_vgpu *vgpu,
  894. struct intel_vgpu_oos_page *oos_page)
  895. {
  896. const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
  897. struct intel_gvt *gvt = vgpu->gvt;
  898. struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
  899. struct intel_vgpu_ppgtt_spt *spt =
  900. guest_page_to_ppgtt_spt(oos_page->guest_page);
  901. struct intel_gvt_gtt_entry old, new, m;
  902. int index;
  903. int ret;
  904. trace_oos_change(vgpu->id, "sync", oos_page->id,
  905. oos_page->guest_page, spt->guest_page_type);
  906. old.type = new.type = get_entry_type(spt->guest_page_type);
  907. old.val64 = new.val64 = 0;
  908. for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
  909. index++) {
  910. ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
  911. ops->get_entry(NULL, &new, index, true,
  912. oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
  913. if (old.val64 == new.val64
  914. && !test_and_clear_bit(index, spt->post_shadow_bitmap))
  915. continue;
  916. trace_oos_sync(vgpu->id, oos_page->id,
  917. oos_page->guest_page, spt->guest_page_type,
  918. new.val64, index);
  919. ret = gtt_entry_p2m(vgpu, &new, &m);
  920. if (ret)
  921. return ret;
  922. ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
  923. ppgtt_set_shadow_entry(spt, &m, index);
  924. }
  925. oos_page->guest_page->write_cnt = 0;
  926. list_del_init(&spt->post_shadow_list);
  927. return 0;
  928. }
  929. static int detach_oos_page(struct intel_vgpu *vgpu,
  930. struct intel_vgpu_oos_page *oos_page)
  931. {
  932. struct intel_gvt *gvt = vgpu->gvt;
  933. struct intel_vgpu_ppgtt_spt *spt =
  934. guest_page_to_ppgtt_spt(oos_page->guest_page);
  935. trace_oos_change(vgpu->id, "detach", oos_page->id,
  936. oos_page->guest_page, spt->guest_page_type);
  937. oos_page->guest_page->write_cnt = 0;
  938. oos_page->guest_page->oos_page = NULL;
  939. oos_page->guest_page = NULL;
  940. list_del_init(&oos_page->vm_list);
  941. list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
  942. return 0;
  943. }
  944. static int attach_oos_page(struct intel_vgpu *vgpu,
  945. struct intel_vgpu_oos_page *oos_page,
  946. struct intel_vgpu_guest_page *gpt)
  947. {
  948. struct intel_gvt *gvt = vgpu->gvt;
  949. int ret;
  950. ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
  951. oos_page->mem, GTT_PAGE_SIZE);
  952. if (ret)
  953. return ret;
  954. oos_page->guest_page = gpt;
  955. gpt->oos_page = oos_page;
  956. list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
  957. trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
  958. gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
  959. return 0;
  960. }
  961. static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
  962. struct intel_vgpu_guest_page *gpt)
  963. {
  964. int ret;
  965. ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
  966. if (ret)
  967. return ret;
  968. trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
  969. gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
  970. list_del_init(&gpt->oos_page->vm_list);
  971. return sync_oos_page(vgpu, gpt->oos_page);
  972. }
  973. static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
  974. struct intel_vgpu_guest_page *gpt)
  975. {
  976. struct intel_gvt *gvt = vgpu->gvt;
  977. struct intel_gvt_gtt *gtt = &gvt->gtt;
  978. struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
  979. int ret;
  980. WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
  981. if (list_empty(&gtt->oos_page_free_list_head)) {
  982. oos_page = container_of(gtt->oos_page_use_list_head.next,
  983. struct intel_vgpu_oos_page, list);
  984. ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
  985. if (ret)
  986. return ret;
  987. ret = detach_oos_page(vgpu, oos_page);
  988. if (ret)
  989. return ret;
  990. } else
  991. oos_page = container_of(gtt->oos_page_free_list_head.next,
  992. struct intel_vgpu_oos_page, list);
  993. return attach_oos_page(vgpu, oos_page, gpt);
  994. }
  995. static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
  996. struct intel_vgpu_guest_page *gpt)
  997. {
  998. struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
  999. if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
  1000. return -EINVAL;
  1001. trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
  1002. gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
  1003. list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
  1004. return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
  1005. }
  1006. /**
  1007. * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
  1008. * @vgpu: a vGPU
  1009. *
  1010. * This function is called before submitting a guest workload to host,
  1011. * to sync all the out-of-synced shadow for vGPU
  1012. *
  1013. * Returns:
  1014. * Zero on success, negative error code if failed.
  1015. */
  1016. int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
  1017. {
  1018. struct list_head *pos, *n;
  1019. struct intel_vgpu_oos_page *oos_page;
  1020. int ret;
  1021. if (!enable_out_of_sync)
  1022. return 0;
  1023. list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
  1024. oos_page = container_of(pos,
  1025. struct intel_vgpu_oos_page, vm_list);
  1026. ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
  1027. if (ret)
  1028. return ret;
  1029. }
  1030. return 0;
  1031. }
  1032. /*
  1033. * The heart of PPGTT shadow page table.
  1034. */
  1035. static int ppgtt_handle_guest_write_page_table(
  1036. struct intel_vgpu_guest_page *gpt,
  1037. struct intel_gvt_gtt_entry *we, unsigned long index)
  1038. {
  1039. struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
  1040. struct intel_vgpu *vgpu = spt->vgpu;
  1041. struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
  1042. int ret;
  1043. int new_present;
  1044. new_present = ops->test_present(we);
  1045. ret = ppgtt_handle_guest_entry_removal(gpt, index);
  1046. if (ret)
  1047. goto fail;
  1048. if (new_present) {
  1049. ret = ppgtt_handle_guest_entry_add(gpt, we, index);
  1050. if (ret)
  1051. goto fail;
  1052. }
  1053. return 0;
  1054. fail:
  1055. gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
  1056. vgpu->id, spt, we->val64, we->type);
  1057. return ret;
  1058. }
  1059. static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
  1060. {
  1061. return enable_out_of_sync
  1062. && gtt_type_is_pte_pt(
  1063. guest_page_to_ppgtt_spt(gpt)->guest_page_type)
  1064. && gpt->write_cnt >= 2;
  1065. }
  1066. static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
  1067. unsigned long index)
  1068. {
  1069. set_bit(index, spt->post_shadow_bitmap);
  1070. if (!list_empty(&spt->post_shadow_list))
  1071. return;
  1072. list_add_tail(&spt->post_shadow_list,
  1073. &spt->vgpu->gtt.post_shadow_list_head);
  1074. }
  1075. /**
  1076. * intel_vgpu_flush_post_shadow - flush the post shadow transactions
  1077. * @vgpu: a vGPU
  1078. *
  1079. * This function is called before submitting a guest workload to host,
  1080. * to flush all the post shadows for a vGPU.
  1081. *
  1082. * Returns:
  1083. * Zero on success, negative error code if failed.
  1084. */
  1085. int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
  1086. {
  1087. struct list_head *pos, *n;
  1088. struct intel_vgpu_ppgtt_spt *spt;
  1089. struct intel_gvt_gtt_entry ge;
  1090. unsigned long index;
  1091. int ret;
  1092. list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
  1093. spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
  1094. post_shadow_list);
  1095. for_each_set_bit(index, spt->post_shadow_bitmap,
  1096. GTT_ENTRY_NUM_IN_ONE_PAGE) {
  1097. ppgtt_get_guest_entry(spt, &ge, index);
  1098. ret = ppgtt_handle_guest_write_page_table(
  1099. &spt->guest_page, &ge, index);
  1100. if (ret)
  1101. return ret;
  1102. clear_bit(index, spt->post_shadow_bitmap);
  1103. }
  1104. list_del_init(&spt->post_shadow_list);
  1105. }
  1106. return 0;
  1107. }
  1108. static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
  1109. u64 pa, void *p_data, int bytes)
  1110. {
  1111. struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
  1112. struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
  1113. struct intel_vgpu *vgpu = spt->vgpu;
  1114. struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
  1115. const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
  1116. struct intel_gvt_gtt_entry we;
  1117. unsigned long index;
  1118. int ret;
  1119. index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
  1120. ppgtt_get_guest_entry(spt, &we, index);
  1121. ops->test_pse(&we);
  1122. if (bytes == info->gtt_entry_size) {
  1123. ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
  1124. if (ret)
  1125. return ret;
  1126. } else {
  1127. if (!test_bit(index, spt->post_shadow_bitmap)) {
  1128. ret = ppgtt_handle_guest_entry_removal(gpt, index);
  1129. if (ret)
  1130. return ret;
  1131. }
  1132. ppgtt_set_post_shadow(spt, index);
  1133. }
  1134. if (!enable_out_of_sync)
  1135. return 0;
  1136. gpt->write_cnt++;
  1137. if (gpt->oos_page)
  1138. ops->set_entry(gpt->oos_page->mem, &we, index,
  1139. false, 0, vgpu);
  1140. if (can_do_out_of_sync(gpt)) {
  1141. if (!gpt->oos_page)
  1142. ppgtt_allocate_oos_page(vgpu, gpt);
  1143. ret = ppgtt_set_guest_page_oos(vgpu, gpt);
  1144. if (ret < 0)
  1145. return ret;
  1146. }
  1147. return 0;
  1148. }
  1149. /*
  1150. * mm page table allocation policy for bdw+
  1151. * - for ggtt, only virtual page table will be allocated.
  1152. * - for ppgtt, dedicated virtual/shadow page table will be allocated.
  1153. */
  1154. static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
  1155. {
  1156. struct intel_vgpu *vgpu = mm->vgpu;
  1157. struct intel_gvt *gvt = vgpu->gvt;
  1158. const struct intel_gvt_device_info *info = &gvt->device_info;
  1159. void *mem;
  1160. if (mm->type == INTEL_GVT_MM_PPGTT) {
  1161. mm->page_table_entry_cnt = 4;
  1162. mm->page_table_entry_size = mm->page_table_entry_cnt *
  1163. info->gtt_entry_size;
  1164. mem = kzalloc(mm->has_shadow_page_table ?
  1165. mm->page_table_entry_size * 2
  1166. : mm->page_table_entry_size,
  1167. GFP_ATOMIC);
  1168. if (!mem)
  1169. return -ENOMEM;
  1170. mm->virtual_page_table = mem;
  1171. if (!mm->has_shadow_page_table)
  1172. return 0;
  1173. mm->shadow_page_table = mem + mm->page_table_entry_size;
  1174. } else if (mm->type == INTEL_GVT_MM_GGTT) {
  1175. mm->page_table_entry_cnt =
  1176. (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
  1177. mm->page_table_entry_size = mm->page_table_entry_cnt *
  1178. info->gtt_entry_size;
  1179. mem = vzalloc(mm->page_table_entry_size);
  1180. if (!mem)
  1181. return -ENOMEM;
  1182. mm->virtual_page_table = mem;
  1183. }
  1184. return 0;
  1185. }
  1186. static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
  1187. {
  1188. if (mm->type == INTEL_GVT_MM_PPGTT) {
  1189. kfree(mm->virtual_page_table);
  1190. } else if (mm->type == INTEL_GVT_MM_GGTT) {
  1191. if (mm->virtual_page_table)
  1192. vfree(mm->virtual_page_table);
  1193. }
  1194. mm->virtual_page_table = mm->shadow_page_table = NULL;
  1195. }
  1196. static void invalidate_mm(struct intel_vgpu_mm *mm)
  1197. {
  1198. struct intel_vgpu *vgpu = mm->vgpu;
  1199. struct intel_gvt *gvt = vgpu->gvt;
  1200. struct intel_gvt_gtt *gtt = &gvt->gtt;
  1201. struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
  1202. struct intel_gvt_gtt_entry se;
  1203. int i;
  1204. if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
  1205. return;
  1206. for (i = 0; i < mm->page_table_entry_cnt; i++) {
  1207. ppgtt_get_shadow_root_entry(mm, &se, i);
  1208. if (!ops->test_present(&se))
  1209. continue;
  1210. ppgtt_invalidate_shadow_page_by_shadow_entry(
  1211. vgpu, &se);
  1212. se.val64 = 0;
  1213. ppgtt_set_shadow_root_entry(mm, &se, i);
  1214. trace_gpt_change(vgpu->id, "destroy root pointer",
  1215. NULL, se.type, se.val64, i);
  1216. }
  1217. mm->shadowed = false;
  1218. }
  1219. /**
  1220. * intel_vgpu_destroy_mm - destroy a mm object
  1221. * @mm: a kref object
  1222. *
  1223. * This function is used to destroy a mm object for vGPU
  1224. *
  1225. */
  1226. void intel_vgpu_destroy_mm(struct kref *mm_ref)
  1227. {
  1228. struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
  1229. struct intel_vgpu *vgpu = mm->vgpu;
  1230. struct intel_gvt *gvt = vgpu->gvt;
  1231. struct intel_gvt_gtt *gtt = &gvt->gtt;
  1232. if (!mm->initialized)
  1233. goto out;
  1234. list_del(&mm->list);
  1235. list_del(&mm->lru_list);
  1236. if (mm->has_shadow_page_table)
  1237. invalidate_mm(mm);
  1238. gtt->mm_free_page_table(mm);
  1239. out:
  1240. kfree(mm);
  1241. }
  1242. static int shadow_mm(struct intel_vgpu_mm *mm)
  1243. {
  1244. struct intel_vgpu *vgpu = mm->vgpu;
  1245. struct intel_gvt *gvt = vgpu->gvt;
  1246. struct intel_gvt_gtt *gtt = &gvt->gtt;
  1247. struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
  1248. struct intel_vgpu_ppgtt_spt *spt;
  1249. struct intel_gvt_gtt_entry ge, se;
  1250. int i;
  1251. int ret;
  1252. if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
  1253. return 0;
  1254. mm->shadowed = true;
  1255. for (i = 0; i < mm->page_table_entry_cnt; i++) {
  1256. ppgtt_get_guest_root_entry(mm, &ge, i);
  1257. if (!ops->test_present(&ge))
  1258. continue;
  1259. trace_gpt_change(vgpu->id, __func__, NULL,
  1260. ge.type, ge.val64, i);
  1261. spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
  1262. if (IS_ERR(spt)) {
  1263. gvt_err("fail to populate guest root pointer\n");
  1264. ret = PTR_ERR(spt);
  1265. goto fail;
  1266. }
  1267. ppgtt_generate_shadow_entry(&se, spt, &ge);
  1268. ppgtt_set_shadow_root_entry(mm, &se, i);
  1269. trace_gpt_change(vgpu->id, "populate root pointer",
  1270. NULL, se.type, se.val64, i);
  1271. }
  1272. return 0;
  1273. fail:
  1274. invalidate_mm(mm);
  1275. return ret;
  1276. }
  1277. /**
  1278. * intel_vgpu_create_mm - create a mm object for a vGPU
  1279. * @vgpu: a vGPU
  1280. * @mm_type: mm object type, should be PPGTT or GGTT
  1281. * @virtual_page_table: page table root pointers. Could be NULL if user wants
  1282. * to populate shadow later.
  1283. * @page_table_level: describe the page table level of the mm object
  1284. * @pde_base_index: pde root pointer base in GGTT MMIO.
  1285. *
  1286. * This function is used to create a mm object for a vGPU.
  1287. *
  1288. * Returns:
  1289. * Zero on success, negative error code in pointer if failed.
  1290. */
  1291. struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
  1292. int mm_type, void *virtual_page_table, int page_table_level,
  1293. u32 pde_base_index)
  1294. {
  1295. struct intel_gvt *gvt = vgpu->gvt;
  1296. struct intel_gvt_gtt *gtt = &gvt->gtt;
  1297. struct intel_vgpu_mm *mm;
  1298. int ret;
  1299. mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
  1300. if (!mm) {
  1301. ret = -ENOMEM;
  1302. goto fail;
  1303. }
  1304. mm->type = mm_type;
  1305. if (page_table_level == 1)
  1306. mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
  1307. else if (page_table_level == 3)
  1308. mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
  1309. else if (page_table_level == 4)
  1310. mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
  1311. else {
  1312. WARN_ON(1);
  1313. ret = -EINVAL;
  1314. goto fail;
  1315. }
  1316. mm->page_table_level = page_table_level;
  1317. mm->pde_base_index = pde_base_index;
  1318. mm->vgpu = vgpu;
  1319. mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
  1320. kref_init(&mm->ref);
  1321. atomic_set(&mm->pincount, 0);
  1322. INIT_LIST_HEAD(&mm->list);
  1323. INIT_LIST_HEAD(&mm->lru_list);
  1324. list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
  1325. ret = gtt->mm_alloc_page_table(mm);
  1326. if (ret) {
  1327. gvt_err("fail to allocate page table for mm\n");
  1328. goto fail;
  1329. }
  1330. mm->initialized = true;
  1331. if (virtual_page_table)
  1332. memcpy(mm->virtual_page_table, virtual_page_table,
  1333. mm->page_table_entry_size);
  1334. if (mm->has_shadow_page_table) {
  1335. ret = shadow_mm(mm);
  1336. if (ret)
  1337. goto fail;
  1338. list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
  1339. }
  1340. return mm;
  1341. fail:
  1342. gvt_err("fail to create mm\n");
  1343. if (mm)
  1344. intel_gvt_mm_unreference(mm);
  1345. return ERR_PTR(ret);
  1346. }
  1347. /**
  1348. * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
  1349. * @mm: a vGPU mm object
  1350. *
  1351. * This function is called when user doesn't want to use a vGPU mm object
  1352. */
  1353. void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
  1354. {
  1355. if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
  1356. return;
  1357. atomic_dec(&mm->pincount);
  1358. }
  1359. /**
  1360. * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
  1361. * @vgpu: a vGPU
  1362. *
  1363. * This function is called when user wants to use a vGPU mm object. If this
  1364. * mm object hasn't been shadowed yet, the shadow will be populated at this
  1365. * time.
  1366. *
  1367. * Returns:
  1368. * Zero on success, negative error code if failed.
  1369. */
  1370. int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
  1371. {
  1372. int ret;
  1373. if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
  1374. return 0;
  1375. atomic_inc(&mm->pincount);
  1376. if (!mm->shadowed) {
  1377. ret = shadow_mm(mm);
  1378. if (ret)
  1379. return ret;
  1380. }
  1381. list_del_init(&mm->lru_list);
  1382. list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
  1383. return 0;
  1384. }
  1385. static int reclaim_one_mm(struct intel_gvt *gvt)
  1386. {
  1387. struct intel_vgpu_mm *mm;
  1388. struct list_head *pos, *n;
  1389. list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
  1390. mm = container_of(pos, struct intel_vgpu_mm, lru_list);
  1391. if (mm->type != INTEL_GVT_MM_PPGTT)
  1392. continue;
  1393. if (atomic_read(&mm->pincount))
  1394. continue;
  1395. list_del_init(&mm->lru_list);
  1396. invalidate_mm(mm);
  1397. return 1;
  1398. }
  1399. return 0;
  1400. }
  1401. /*
  1402. * GMA translation APIs.
  1403. */
  1404. static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
  1405. struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
  1406. {
  1407. struct intel_vgpu *vgpu = mm->vgpu;
  1408. struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
  1409. struct intel_vgpu_ppgtt_spt *s;
  1410. if (WARN_ON(!mm->has_shadow_page_table))
  1411. return -EINVAL;
  1412. s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
  1413. if (!s)
  1414. return -ENXIO;
  1415. if (!guest)
  1416. ppgtt_get_shadow_entry(s, e, index);
  1417. else
  1418. ppgtt_get_guest_entry(s, e, index);
  1419. return 0;
  1420. }
  1421. /**
  1422. * intel_vgpu_gma_to_gpa - translate a gma to GPA
  1423. * @mm: mm object. could be a PPGTT or GGTT mm object
  1424. * @gma: graphics memory address in this mm object
  1425. *
  1426. * This function is used to translate a graphics memory address in specific
  1427. * graphics memory space to guest physical address.
  1428. *
  1429. * Returns:
  1430. * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
  1431. */
  1432. unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
  1433. {
  1434. struct intel_vgpu *vgpu = mm->vgpu;
  1435. struct intel_gvt *gvt = vgpu->gvt;
  1436. struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
  1437. struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
  1438. unsigned long gpa = INTEL_GVT_INVALID_ADDR;
  1439. unsigned long gma_index[4];
  1440. struct intel_gvt_gtt_entry e;
  1441. int i, index;
  1442. int ret;
  1443. if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
  1444. return INTEL_GVT_INVALID_ADDR;
  1445. if (mm->type == INTEL_GVT_MM_GGTT) {
  1446. if (!vgpu_gmadr_is_valid(vgpu, gma))
  1447. goto err;
  1448. ggtt_get_guest_entry(mm, &e,
  1449. gma_ops->gma_to_ggtt_pte_index(gma));
  1450. gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
  1451. + (gma & ~GTT_PAGE_MASK);
  1452. trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
  1453. return gpa;
  1454. }
  1455. switch (mm->page_table_level) {
  1456. case 4:
  1457. ppgtt_get_shadow_root_entry(mm, &e, 0);
  1458. gma_index[0] = gma_ops->gma_to_pml4_index(gma);
  1459. gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
  1460. gma_index[2] = gma_ops->gma_to_pde_index(gma);
  1461. gma_index[3] = gma_ops->gma_to_pte_index(gma);
  1462. index = 4;
  1463. break;
  1464. case 3:
  1465. ppgtt_get_shadow_root_entry(mm, &e,
  1466. gma_ops->gma_to_l3_pdp_index(gma));
  1467. gma_index[0] = gma_ops->gma_to_pde_index(gma);
  1468. gma_index[1] = gma_ops->gma_to_pte_index(gma);
  1469. index = 2;
  1470. break;
  1471. case 2:
  1472. ppgtt_get_shadow_root_entry(mm, &e,
  1473. gma_ops->gma_to_pde_index(gma));
  1474. gma_index[0] = gma_ops->gma_to_pte_index(gma);
  1475. index = 1;
  1476. break;
  1477. default:
  1478. WARN_ON(1);
  1479. goto err;
  1480. }
  1481. /* walk into the shadow page table and get gpa from guest entry */
  1482. for (i = 0; i < index; i++) {
  1483. ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
  1484. (i == index - 1));
  1485. if (ret)
  1486. goto err;
  1487. }
  1488. gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
  1489. + (gma & ~GTT_PAGE_MASK);
  1490. trace_gma_translate(vgpu->id, "ppgtt", 0,
  1491. mm->page_table_level, gma, gpa);
  1492. return gpa;
  1493. err:
  1494. gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
  1495. return INTEL_GVT_INVALID_ADDR;
  1496. }
  1497. static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
  1498. unsigned int off, void *p_data, unsigned int bytes)
  1499. {
  1500. struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
  1501. const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
  1502. unsigned long index = off >> info->gtt_entry_size_shift;
  1503. struct intel_gvt_gtt_entry e;
  1504. if (bytes != 4 && bytes != 8)
  1505. return -EINVAL;
  1506. ggtt_get_guest_entry(ggtt_mm, &e, index);
  1507. memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
  1508. bytes);
  1509. return 0;
  1510. }
  1511. /**
  1512. * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
  1513. * @vgpu: a vGPU
  1514. * @off: register offset
  1515. * @p_data: data will be returned to guest
  1516. * @bytes: data length
  1517. *
  1518. * This function is used to emulate the GTT MMIO register read
  1519. *
  1520. * Returns:
  1521. * Zero on success, error code if failed.
  1522. */
  1523. int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
  1524. void *p_data, unsigned int bytes)
  1525. {
  1526. const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
  1527. int ret;
  1528. if (bytes != 4 && bytes != 8)
  1529. return -EINVAL;
  1530. off -= info->gtt_start_offset;
  1531. ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
  1532. return ret;
  1533. }
  1534. static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
  1535. void *p_data, unsigned int bytes)
  1536. {
  1537. struct intel_gvt *gvt = vgpu->gvt;
  1538. const struct intel_gvt_device_info *info = &gvt->device_info;
  1539. struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
  1540. struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
  1541. unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
  1542. unsigned long gma;
  1543. struct intel_gvt_gtt_entry e, m;
  1544. int ret;
  1545. if (bytes != 4 && bytes != 8)
  1546. return -EINVAL;
  1547. gma = g_gtt_index << GTT_PAGE_SHIFT;
  1548. /* the VM may configure the whole GM space when ballooning is used */
  1549. if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
  1550. "vgpu%d: found oob ggtt write, offset %x\n",
  1551. vgpu->id, off)) {
  1552. return 0;
  1553. }
  1554. ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
  1555. memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
  1556. bytes);
  1557. if (ops->test_present(&e)) {
  1558. ret = gtt_entry_p2m(vgpu, &e, &m);
  1559. if (ret) {
  1560. gvt_err("vgpu%d: fail to translate guest gtt entry\n",
  1561. vgpu->id);
  1562. return ret;
  1563. }
  1564. } else {
  1565. m = e;
  1566. m.val64 = 0;
  1567. }
  1568. ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
  1569. ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
  1570. return 0;
  1571. }
  1572. /*
  1573. * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
  1574. * @vgpu: a vGPU
  1575. * @off: register offset
  1576. * @p_data: data from guest write
  1577. * @bytes: data length
  1578. *
  1579. * This function is used to emulate the GTT MMIO register write
  1580. *
  1581. * Returns:
  1582. * Zero on success, error code if failed.
  1583. */
  1584. int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
  1585. void *p_data, unsigned int bytes)
  1586. {
  1587. const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
  1588. int ret;
  1589. if (bytes != 4 && bytes != 8)
  1590. return -EINVAL;
  1591. off -= info->gtt_start_offset;
  1592. ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
  1593. return ret;
  1594. }
  1595. static int alloc_scratch_pages(struct intel_vgpu *vgpu,
  1596. intel_gvt_gtt_type_t type)
  1597. {
  1598. struct intel_vgpu_gtt *gtt = &vgpu->gtt;
  1599. struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
  1600. int page_entry_num = GTT_PAGE_SIZE >>
  1601. vgpu->gvt->device_info.gtt_entry_size_shift;
  1602. struct page *scratch_pt;
  1603. unsigned long mfn;
  1604. int i;
  1605. void *p;
  1606. if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
  1607. return -EINVAL;
  1608. scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
  1609. if (!scratch_pt) {
  1610. gvt_err("fail to allocate scratch page\n");
  1611. return -ENOMEM;
  1612. }
  1613. p = kmap_atomic(scratch_pt);
  1614. mfn = intel_gvt_hypervisor_virt_to_mfn(p);
  1615. if (mfn == INTEL_GVT_INVALID_ADDR) {
  1616. gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
  1617. kunmap_atomic(p);
  1618. __free_page(scratch_pt);
  1619. return -EFAULT;
  1620. }
  1621. gtt->scratch_pt[type].page_mfn = mfn;
  1622. gtt->scratch_pt[type].page = scratch_pt;
  1623. gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
  1624. vgpu->id, type, mfn);
  1625. /* Build the tree by full filled the scratch pt with the entries which
  1626. * point to the next level scratch pt or scratch page. The
  1627. * scratch_pt[type] indicate the scratch pt/scratch page used by the
  1628. * 'type' pt.
  1629. * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
  1630. * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
  1631. * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
  1632. */
  1633. if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
  1634. struct intel_gvt_gtt_entry se;
  1635. memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
  1636. se.type = get_entry_type(type - 1);
  1637. ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
  1638. /* The entry parameters like present/writeable/cache type
  1639. * set to the same as i915's scratch page tree.
  1640. */
  1641. se.val64 |= _PAGE_PRESENT | _PAGE_RW;
  1642. if (type == GTT_TYPE_PPGTT_PDE_PT)
  1643. se.val64 |= PPAT_CACHED_INDEX;
  1644. for (i = 0; i < page_entry_num; i++)
  1645. ops->set_entry(p, &se, i, false, 0, vgpu);
  1646. }
  1647. kunmap_atomic(p);
  1648. return 0;
  1649. }
  1650. static int release_scratch_page_tree(struct intel_vgpu *vgpu)
  1651. {
  1652. int i;
  1653. for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
  1654. if (vgpu->gtt.scratch_pt[i].page != NULL) {
  1655. __free_page(vgpu->gtt.scratch_pt[i].page);
  1656. vgpu->gtt.scratch_pt[i].page = NULL;
  1657. vgpu->gtt.scratch_pt[i].page_mfn = 0;
  1658. }
  1659. }
  1660. return 0;
  1661. }
  1662. static int create_scratch_page_tree(struct intel_vgpu *vgpu)
  1663. {
  1664. int i, ret;
  1665. for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
  1666. ret = alloc_scratch_pages(vgpu, i);
  1667. if (ret)
  1668. goto err;
  1669. }
  1670. return 0;
  1671. err:
  1672. release_scratch_page_tree(vgpu);
  1673. return ret;
  1674. }
  1675. /**
  1676. * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
  1677. * @vgpu: a vGPU
  1678. *
  1679. * This function is used to initialize per-vGPU graphics memory virtualization
  1680. * components.
  1681. *
  1682. * Returns:
  1683. * Zero on success, error code if failed.
  1684. */
  1685. int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
  1686. {
  1687. struct intel_vgpu_gtt *gtt = &vgpu->gtt;
  1688. struct intel_vgpu_mm *ggtt_mm;
  1689. hash_init(gtt->guest_page_hash_table);
  1690. hash_init(gtt->shadow_page_hash_table);
  1691. INIT_LIST_HEAD(&gtt->mm_list_head);
  1692. INIT_LIST_HEAD(&gtt->oos_page_list_head);
  1693. INIT_LIST_HEAD(&gtt->post_shadow_list_head);
  1694. ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
  1695. NULL, 1, 0);
  1696. if (IS_ERR(ggtt_mm)) {
  1697. gvt_err("fail to create mm for ggtt.\n");
  1698. return PTR_ERR(ggtt_mm);
  1699. }
  1700. gtt->ggtt_mm = ggtt_mm;
  1701. return create_scratch_page_tree(vgpu);
  1702. }
  1703. /**
  1704. * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
  1705. * @vgpu: a vGPU
  1706. *
  1707. * This function is used to clean up per-vGPU graphics memory virtualization
  1708. * components.
  1709. *
  1710. * Returns:
  1711. * Zero on success, error code if failed.
  1712. */
  1713. void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
  1714. {
  1715. struct list_head *pos, *n;
  1716. struct intel_vgpu_mm *mm;
  1717. ppgtt_free_all_shadow_page(vgpu);
  1718. release_scratch_page_tree(vgpu);
  1719. list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
  1720. mm = container_of(pos, struct intel_vgpu_mm, list);
  1721. vgpu->gvt->gtt.mm_free_page_table(mm);
  1722. list_del(&mm->list);
  1723. list_del(&mm->lru_list);
  1724. kfree(mm);
  1725. }
  1726. }
  1727. static void clean_spt_oos(struct intel_gvt *gvt)
  1728. {
  1729. struct intel_gvt_gtt *gtt = &gvt->gtt;
  1730. struct list_head *pos, *n;
  1731. struct intel_vgpu_oos_page *oos_page;
  1732. WARN(!list_empty(&gtt->oos_page_use_list_head),
  1733. "someone is still using oos page\n");
  1734. list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
  1735. oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
  1736. list_del(&oos_page->list);
  1737. kfree(oos_page);
  1738. }
  1739. }
  1740. static int setup_spt_oos(struct intel_gvt *gvt)
  1741. {
  1742. struct intel_gvt_gtt *gtt = &gvt->gtt;
  1743. struct intel_vgpu_oos_page *oos_page;
  1744. int i;
  1745. int ret;
  1746. INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
  1747. INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
  1748. for (i = 0; i < preallocated_oos_pages; i++) {
  1749. oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
  1750. if (!oos_page) {
  1751. gvt_err("fail to pre-allocate oos page\n");
  1752. ret = -ENOMEM;
  1753. goto fail;
  1754. }
  1755. INIT_LIST_HEAD(&oos_page->list);
  1756. INIT_LIST_HEAD(&oos_page->vm_list);
  1757. oos_page->id = i;
  1758. list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
  1759. }
  1760. gvt_dbg_mm("%d oos pages preallocated\n", i);
  1761. return 0;
  1762. fail:
  1763. clean_spt_oos(gvt);
  1764. return ret;
  1765. }
  1766. /**
  1767. * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
  1768. * @vgpu: a vGPU
  1769. * @page_table_level: PPGTT page table level
  1770. * @root_entry: PPGTT page table root pointers
  1771. *
  1772. * This function is used to find a PPGTT mm object from mm object pool
  1773. *
  1774. * Returns:
  1775. * pointer to mm object on success, NULL if failed.
  1776. */
  1777. struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
  1778. int page_table_level, void *root_entry)
  1779. {
  1780. struct list_head *pos;
  1781. struct intel_vgpu_mm *mm;
  1782. u64 *src, *dst;
  1783. list_for_each(pos, &vgpu->gtt.mm_list_head) {
  1784. mm = container_of(pos, struct intel_vgpu_mm, list);
  1785. if (mm->type != INTEL_GVT_MM_PPGTT)
  1786. continue;
  1787. if (mm->page_table_level != page_table_level)
  1788. continue;
  1789. src = root_entry;
  1790. dst = mm->virtual_page_table;
  1791. if (page_table_level == 3) {
  1792. if (src[0] == dst[0]
  1793. && src[1] == dst[1]
  1794. && src[2] == dst[2]
  1795. && src[3] == dst[3])
  1796. return mm;
  1797. } else {
  1798. if (src[0] == dst[0])
  1799. return mm;
  1800. }
  1801. }
  1802. return NULL;
  1803. }
  1804. /**
  1805. * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
  1806. * g2v notification
  1807. * @vgpu: a vGPU
  1808. * @page_table_level: PPGTT page table level
  1809. *
  1810. * This function is used to create a PPGTT mm object from a guest to GVT-g
  1811. * notification.
  1812. *
  1813. * Returns:
  1814. * Zero on success, negative error code if failed.
  1815. */
  1816. int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
  1817. int page_table_level)
  1818. {
  1819. u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
  1820. struct intel_vgpu_mm *mm;
  1821. if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
  1822. return -EINVAL;
  1823. mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
  1824. if (mm) {
  1825. intel_gvt_mm_reference(mm);
  1826. } else {
  1827. mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
  1828. pdp, page_table_level, 0);
  1829. if (IS_ERR(mm)) {
  1830. gvt_err("fail to create mm\n");
  1831. return PTR_ERR(mm);
  1832. }
  1833. }
  1834. return 0;
  1835. }
  1836. /**
  1837. * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
  1838. * g2v notification
  1839. * @vgpu: a vGPU
  1840. * @page_table_level: PPGTT page table level
  1841. *
  1842. * This function is used to create a PPGTT mm object from a guest to GVT-g
  1843. * notification.
  1844. *
  1845. * Returns:
  1846. * Zero on success, negative error code if failed.
  1847. */
  1848. int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
  1849. int page_table_level)
  1850. {
  1851. u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
  1852. struct intel_vgpu_mm *mm;
  1853. if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
  1854. return -EINVAL;
  1855. mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
  1856. if (!mm) {
  1857. gvt_err("fail to find ppgtt instance.\n");
  1858. return -EINVAL;
  1859. }
  1860. intel_gvt_mm_unreference(mm);
  1861. return 0;
  1862. }
  1863. /**
  1864. * intel_gvt_init_gtt - initialize mm components of a GVT device
  1865. * @gvt: GVT device
  1866. *
  1867. * This function is called at the initialization stage, to initialize
  1868. * the mm components of a GVT device.
  1869. *
  1870. * Returns:
  1871. * zero on success, negative error code if failed.
  1872. */
  1873. int intel_gvt_init_gtt(struct intel_gvt *gvt)
  1874. {
  1875. int ret;
  1876. gvt_dbg_core("init gtt\n");
  1877. if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
  1878. gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
  1879. gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
  1880. gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
  1881. gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
  1882. } else {
  1883. return -ENODEV;
  1884. }
  1885. if (enable_out_of_sync) {
  1886. ret = setup_spt_oos(gvt);
  1887. if (ret) {
  1888. gvt_err("fail to initialize SPT oos\n");
  1889. return ret;
  1890. }
  1891. }
  1892. INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
  1893. return 0;
  1894. }
  1895. /**
  1896. * intel_gvt_clean_gtt - clean up mm components of a GVT device
  1897. * @gvt: GVT device
  1898. *
  1899. * This function is called at the driver unloading stage, to clean up the
  1900. * the mm components of a GVT device.
  1901. *
  1902. */
  1903. void intel_gvt_clean_gtt(struct intel_gvt *gvt)
  1904. {
  1905. if (enable_out_of_sync)
  1906. clean_spt_oos(gvt);
  1907. }