mtrr.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. /*
  2. * vMTRR implementation
  3. *
  4. * Copyright (C) 2006 Qumranet, Inc.
  5. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  6. * Copyright(C) 2015 Intel Corporation.
  7. *
  8. * Authors:
  9. * Yaniv Kamay <yaniv@qumranet.com>
  10. * Avi Kivity <avi@qumranet.com>
  11. * Marcelo Tosatti <mtosatti@redhat.com>
  12. * Paolo Bonzini <pbonzini@redhat.com>
  13. * Xiao Guangrong <guangrong.xiao@linux.intel.com>
  14. *
  15. * This work is licensed under the terms of the GNU GPL, version 2. See
  16. * the COPYING file in the top-level directory.
  17. */
  18. #include <linux/kvm_host.h>
  19. #include <asm/mtrr.h>
  20. #include "cpuid.h"
  21. #include "mmu.h"
  22. #define IA32_MTRR_DEF_TYPE_E (1ULL << 11)
  23. #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
  24. #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
  25. static bool msr_mtrr_valid(unsigned msr)
  26. {
  27. switch (msr) {
  28. case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  29. case MSR_MTRRfix64K_00000:
  30. case MSR_MTRRfix16K_80000:
  31. case MSR_MTRRfix16K_A0000:
  32. case MSR_MTRRfix4K_C0000:
  33. case MSR_MTRRfix4K_C8000:
  34. case MSR_MTRRfix4K_D0000:
  35. case MSR_MTRRfix4K_D8000:
  36. case MSR_MTRRfix4K_E0000:
  37. case MSR_MTRRfix4K_E8000:
  38. case MSR_MTRRfix4K_F0000:
  39. case MSR_MTRRfix4K_F8000:
  40. case MSR_MTRRdefType:
  41. case MSR_IA32_CR_PAT:
  42. return true;
  43. case 0x2f8:
  44. return true;
  45. }
  46. return false;
  47. }
  48. static bool valid_pat_type(unsigned t)
  49. {
  50. return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
  51. }
  52. static bool valid_mtrr_type(unsigned t)
  53. {
  54. return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
  55. }
  56. bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  57. {
  58. int i;
  59. u64 mask;
  60. if (!msr_mtrr_valid(msr))
  61. return false;
  62. if (msr == MSR_IA32_CR_PAT) {
  63. for (i = 0; i < 8; i++)
  64. if (!valid_pat_type((data >> (i * 8)) & 0xff))
  65. return false;
  66. return true;
  67. } else if (msr == MSR_MTRRdefType) {
  68. if (data & ~0xcff)
  69. return false;
  70. return valid_mtrr_type(data & 0xff);
  71. } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
  72. for (i = 0; i < 8 ; i++)
  73. if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
  74. return false;
  75. return true;
  76. }
  77. /* variable MTRRs */
  78. WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
  79. mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
  80. if ((msr & 1) == 0) {
  81. /* MTRR base */
  82. if (!valid_mtrr_type(data & 0xff))
  83. return false;
  84. mask |= 0xf00;
  85. } else
  86. /* MTRR mask */
  87. mask |= 0x7ff;
  88. if (data & mask) {
  89. kvm_inject_gp(vcpu, 0);
  90. return false;
  91. }
  92. return true;
  93. }
  94. EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
  95. static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
  96. {
  97. return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
  98. }
  99. static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
  100. {
  101. return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
  102. }
  103. static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
  104. {
  105. return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
  106. }
  107. static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
  108. {
  109. /*
  110. * Intel SDM 11.11.2.2: all MTRRs are disabled when
  111. * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
  112. * memory type is applied to all of physical memory.
  113. *
  114. * However, virtual machines can be run with CPUID such that
  115. * there are no MTRRs. In that case, the firmware will never
  116. * enable MTRRs and it is obviously undesirable to run the
  117. * guest entirely with UC memory and we use WB.
  118. */
  119. if (guest_cpuid_has_mtrr(vcpu))
  120. return MTRR_TYPE_UNCACHABLE;
  121. else
  122. return MTRR_TYPE_WRBACK;
  123. }
  124. /*
  125. * Three terms are used in the following code:
  126. * - segment, it indicates the address segments covered by fixed MTRRs.
  127. * - unit, it corresponds to the MSR entry in the segment.
  128. * - range, a range is covered in one memory cache type.
  129. */
  130. struct fixed_mtrr_segment {
  131. u64 start;
  132. u64 end;
  133. int range_shift;
  134. /* the start position in kvm_mtrr.fixed_ranges[]. */
  135. int range_start;
  136. };
  137. static struct fixed_mtrr_segment fixed_seg_table[] = {
  138. /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
  139. {
  140. .start = 0x0,
  141. .end = 0x80000,
  142. .range_shift = 16, /* 64K */
  143. .range_start = 0,
  144. },
  145. /*
  146. * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
  147. * 16K fixed mtrr.
  148. */
  149. {
  150. .start = 0x80000,
  151. .end = 0xc0000,
  152. .range_shift = 14, /* 16K */
  153. .range_start = 8,
  154. },
  155. /*
  156. * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
  157. * 4K fixed mtrr.
  158. */
  159. {
  160. .start = 0xc0000,
  161. .end = 0x100000,
  162. .range_shift = 12, /* 12K */
  163. .range_start = 24,
  164. }
  165. };
  166. /*
  167. * The size of unit is covered in one MSR, one MSR entry contains
  168. * 8 ranges so that unit size is always 8 * 2^range_shift.
  169. */
  170. static u64 fixed_mtrr_seg_unit_size(int seg)
  171. {
  172. return 8 << fixed_seg_table[seg].range_shift;
  173. }
  174. static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
  175. {
  176. switch (msr) {
  177. case MSR_MTRRfix64K_00000:
  178. *seg = 0;
  179. *unit = 0;
  180. break;
  181. case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
  182. *seg = 1;
  183. *unit = msr - MSR_MTRRfix16K_80000;
  184. break;
  185. case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
  186. *seg = 2;
  187. *unit = msr - MSR_MTRRfix4K_C0000;
  188. break;
  189. default:
  190. return false;
  191. }
  192. return true;
  193. }
  194. static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
  195. {
  196. struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
  197. u64 unit_size = fixed_mtrr_seg_unit_size(seg);
  198. *start = mtrr_seg->start + unit * unit_size;
  199. *end = *start + unit_size;
  200. WARN_ON(*end > mtrr_seg->end);
  201. }
  202. static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
  203. {
  204. struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
  205. WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
  206. > mtrr_seg->end);
  207. /* each unit has 8 ranges. */
  208. return mtrr_seg->range_start + 8 * unit;
  209. }
  210. static int fixed_mtrr_seg_end_range_index(int seg)
  211. {
  212. struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
  213. int n;
  214. n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
  215. return mtrr_seg->range_start + n - 1;
  216. }
  217. static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
  218. {
  219. int seg, unit;
  220. if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
  221. return false;
  222. fixed_mtrr_seg_unit_range(seg, unit, start, end);
  223. return true;
  224. }
  225. static int fixed_msr_to_range_index(u32 msr)
  226. {
  227. int seg, unit;
  228. if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
  229. return -1;
  230. return fixed_mtrr_seg_unit_range_index(seg, unit);
  231. }
  232. static int fixed_mtrr_addr_to_seg(u64 addr)
  233. {
  234. struct fixed_mtrr_segment *mtrr_seg;
  235. int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
  236. for (seg = 0; seg < seg_num; seg++) {
  237. mtrr_seg = &fixed_seg_table[seg];
  238. if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
  239. return seg;
  240. }
  241. return -1;
  242. }
  243. static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
  244. {
  245. struct fixed_mtrr_segment *mtrr_seg;
  246. int index;
  247. mtrr_seg = &fixed_seg_table[seg];
  248. index = mtrr_seg->range_start;
  249. index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
  250. return index;
  251. }
  252. static u64 fixed_mtrr_range_end_addr(int seg, int index)
  253. {
  254. struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
  255. int pos = index - mtrr_seg->range_start;
  256. return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
  257. }
  258. static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
  259. {
  260. u64 mask;
  261. *start = range->base & PAGE_MASK;
  262. mask = range->mask & PAGE_MASK;
  263. /* This cannot overflow because writing to the reserved bits of
  264. * variable MTRRs causes a #GP.
  265. */
  266. *end = (*start | ~mask) + 1;
  267. }
  268. static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
  269. {
  270. struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
  271. gfn_t start, end;
  272. int index;
  273. if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
  274. !kvm_arch_has_noncoherent_dma(vcpu->kvm))
  275. return;
  276. if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
  277. return;
  278. /* fixed MTRRs. */
  279. if (fixed_msr_to_range(msr, &start, &end)) {
  280. if (!fixed_mtrr_is_enabled(mtrr_state))
  281. return;
  282. } else if (msr == MSR_MTRRdefType) {
  283. start = 0x0;
  284. end = ~0ULL;
  285. } else {
  286. /* variable range MTRRs. */
  287. index = (msr - 0x200) / 2;
  288. var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
  289. }
  290. kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
  291. }
  292. static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
  293. {
  294. return (range->mask & (1 << 11)) != 0;
  295. }
  296. static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  297. {
  298. struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
  299. struct kvm_mtrr_range *tmp, *cur;
  300. int index, is_mtrr_mask;
  301. index = (msr - 0x200) / 2;
  302. is_mtrr_mask = msr - 0x200 - 2 * index;
  303. cur = &mtrr_state->var_ranges[index];
  304. /* remove the entry if it's in the list. */
  305. if (var_mtrr_range_is_valid(cur))
  306. list_del(&mtrr_state->var_ranges[index].node);
  307. /* Extend the mask with all 1 bits to the left, since those
  308. * bits must implicitly be 0. The bits are then cleared
  309. * when reading them.
  310. */
  311. if (!is_mtrr_mask)
  312. cur->base = data;
  313. else
  314. cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
  315. /* add it to the list if it's enabled. */
  316. if (var_mtrr_range_is_valid(cur)) {
  317. list_for_each_entry(tmp, &mtrr_state->head, node)
  318. if (cur->base >= tmp->base)
  319. break;
  320. list_add_tail(&cur->node, &tmp->node);
  321. }
  322. }
  323. int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  324. {
  325. int index;
  326. if (!kvm_mtrr_valid(vcpu, msr, data))
  327. return 1;
  328. index = fixed_msr_to_range_index(msr);
  329. if (index >= 0)
  330. *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
  331. else if (msr == MSR_MTRRdefType)
  332. vcpu->arch.mtrr_state.deftype = data;
  333. else if (msr == MSR_IA32_CR_PAT)
  334. vcpu->arch.pat = data;
  335. else
  336. set_var_mtrr_msr(vcpu, msr, data);
  337. update_mtrr(vcpu, msr);
  338. return 0;
  339. }
  340. int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  341. {
  342. int index;
  343. /* MSR_MTRRcap is a readonly MSR. */
  344. if (msr == MSR_MTRRcap) {
  345. /*
  346. * SMRR = 0
  347. * WC = 1
  348. * FIX = 1
  349. * VCNT = KVM_NR_VAR_MTRR
  350. */
  351. *pdata = 0x500 | KVM_NR_VAR_MTRR;
  352. return 0;
  353. }
  354. if (!msr_mtrr_valid(msr))
  355. return 1;
  356. index = fixed_msr_to_range_index(msr);
  357. if (index >= 0)
  358. *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
  359. else if (msr == MSR_MTRRdefType)
  360. *pdata = vcpu->arch.mtrr_state.deftype;
  361. else if (msr == MSR_IA32_CR_PAT)
  362. *pdata = vcpu->arch.pat;
  363. else { /* Variable MTRRs */
  364. int is_mtrr_mask;
  365. index = (msr - 0x200) / 2;
  366. is_mtrr_mask = msr - 0x200 - 2 * index;
  367. if (!is_mtrr_mask)
  368. *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
  369. else
  370. *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
  371. *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
  372. }
  373. return 0;
  374. }
  375. void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
  376. {
  377. INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
  378. }
  379. struct mtrr_iter {
  380. /* input fields. */
  381. struct kvm_mtrr *mtrr_state;
  382. u64 start;
  383. u64 end;
  384. /* output fields. */
  385. int mem_type;
  386. /* mtrr is completely disabled? */
  387. bool mtrr_disabled;
  388. /* [start, end) is not fully covered in MTRRs? */
  389. bool partial_map;
  390. /* private fields. */
  391. union {
  392. /* used for fixed MTRRs. */
  393. struct {
  394. int index;
  395. int seg;
  396. };
  397. /* used for var MTRRs. */
  398. struct {
  399. struct kvm_mtrr_range *range;
  400. /* max address has been covered in var MTRRs. */
  401. u64 start_max;
  402. };
  403. };
  404. bool fixed;
  405. };
  406. static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
  407. {
  408. int seg, index;
  409. if (!fixed_mtrr_is_enabled(iter->mtrr_state))
  410. return false;
  411. seg = fixed_mtrr_addr_to_seg(iter->start);
  412. if (seg < 0)
  413. return false;
  414. iter->fixed = true;
  415. index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
  416. iter->index = index;
  417. iter->seg = seg;
  418. return true;
  419. }
  420. static bool match_var_range(struct mtrr_iter *iter,
  421. struct kvm_mtrr_range *range)
  422. {
  423. u64 start, end;
  424. var_mtrr_range(range, &start, &end);
  425. if (!(start >= iter->end || end <= iter->start)) {
  426. iter->range = range;
  427. /*
  428. * the function is called when we do kvm_mtrr.head walking.
  429. * Range has the minimum base address which interleaves
  430. * [looker->start_max, looker->end).
  431. */
  432. iter->partial_map |= iter->start_max < start;
  433. /* update the max address has been covered. */
  434. iter->start_max = max(iter->start_max, end);
  435. return true;
  436. }
  437. return false;
  438. }
  439. static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
  440. {
  441. struct kvm_mtrr *mtrr_state = iter->mtrr_state;
  442. list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
  443. if (match_var_range(iter, iter->range))
  444. return;
  445. iter->range = NULL;
  446. iter->partial_map |= iter->start_max < iter->end;
  447. }
  448. static void mtrr_lookup_var_start(struct mtrr_iter *iter)
  449. {
  450. struct kvm_mtrr *mtrr_state = iter->mtrr_state;
  451. iter->fixed = false;
  452. iter->start_max = iter->start;
  453. iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
  454. __mtrr_lookup_var_next(iter);
  455. }
  456. static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
  457. {
  458. /* terminate the lookup. */
  459. if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
  460. iter->fixed = false;
  461. iter->range = NULL;
  462. return;
  463. }
  464. iter->index++;
  465. /* have looked up for all fixed MTRRs. */
  466. if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
  467. return mtrr_lookup_var_start(iter);
  468. /* switch to next segment. */
  469. if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
  470. iter->seg++;
  471. }
  472. static void mtrr_lookup_var_next(struct mtrr_iter *iter)
  473. {
  474. __mtrr_lookup_var_next(iter);
  475. }
  476. static void mtrr_lookup_start(struct mtrr_iter *iter)
  477. {
  478. if (!mtrr_is_enabled(iter->mtrr_state)) {
  479. iter->mtrr_disabled = true;
  480. return;
  481. }
  482. if (!mtrr_lookup_fixed_start(iter))
  483. mtrr_lookup_var_start(iter);
  484. }
  485. static void mtrr_lookup_init(struct mtrr_iter *iter,
  486. struct kvm_mtrr *mtrr_state, u64 start, u64 end)
  487. {
  488. iter->mtrr_state = mtrr_state;
  489. iter->start = start;
  490. iter->end = end;
  491. iter->mtrr_disabled = false;
  492. iter->partial_map = false;
  493. iter->fixed = false;
  494. iter->range = NULL;
  495. mtrr_lookup_start(iter);
  496. }
  497. static bool mtrr_lookup_okay(struct mtrr_iter *iter)
  498. {
  499. if (iter->fixed) {
  500. iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
  501. return true;
  502. }
  503. if (iter->range) {
  504. iter->mem_type = iter->range->base & 0xff;
  505. return true;
  506. }
  507. return false;
  508. }
  509. static void mtrr_lookup_next(struct mtrr_iter *iter)
  510. {
  511. if (iter->fixed)
  512. mtrr_lookup_fixed_next(iter);
  513. else
  514. mtrr_lookup_var_next(iter);
  515. }
  516. #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
  517. for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
  518. mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
  519. u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
  520. {
  521. struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
  522. struct mtrr_iter iter;
  523. u64 start, end;
  524. int type = -1;
  525. const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
  526. | (1 << MTRR_TYPE_WRTHROUGH);
  527. start = gfn_to_gpa(gfn);
  528. end = start + PAGE_SIZE;
  529. mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
  530. int curr_type = iter.mem_type;
  531. /*
  532. * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
  533. * Precedences.
  534. */
  535. if (type == -1) {
  536. type = curr_type;
  537. continue;
  538. }
  539. /*
  540. * If two or more variable memory ranges match and the
  541. * memory types are identical, then that memory type is
  542. * used.
  543. */
  544. if (type == curr_type)
  545. continue;
  546. /*
  547. * If two or more variable memory ranges match and one of
  548. * the memory types is UC, the UC memory type used.
  549. */
  550. if (curr_type == MTRR_TYPE_UNCACHABLE)
  551. return MTRR_TYPE_UNCACHABLE;
  552. /*
  553. * If two or more variable memory ranges match and the
  554. * memory types are WT and WB, the WT memory type is used.
  555. */
  556. if (((1 << type) & wt_wb_mask) &&
  557. ((1 << curr_type) & wt_wb_mask)) {
  558. type = MTRR_TYPE_WRTHROUGH;
  559. continue;
  560. }
  561. /*
  562. * For overlaps not defined by the above rules, processor
  563. * behavior is undefined.
  564. */
  565. /* We use WB for this undefined behavior. :( */
  566. return MTRR_TYPE_WRBACK;
  567. }
  568. if (iter.mtrr_disabled)
  569. return mtrr_disabled_type(vcpu);
  570. /* not contained in any MTRRs. */
  571. if (type == -1)
  572. return mtrr_default_type(mtrr_state);
  573. /*
  574. * We just check one page, partially covered by MTRRs is
  575. * impossible.
  576. */
  577. WARN_ON(iter.partial_map);
  578. return type;
  579. }
  580. EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
  581. bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
  582. int page_num)
  583. {
  584. struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
  585. struct mtrr_iter iter;
  586. u64 start, end;
  587. int type = -1;
  588. start = gfn_to_gpa(gfn);
  589. end = gfn_to_gpa(gfn + page_num);
  590. mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
  591. if (type == -1) {
  592. type = iter.mem_type;
  593. continue;
  594. }
  595. if (type != iter.mem_type)
  596. return false;
  597. }
  598. if (iter.mtrr_disabled)
  599. return true;
  600. if (!iter.partial_map)
  601. return true;
  602. if (type == -1)
  603. return true;
  604. return type == mtrr_default_type(mtrr_state);
  605. }