|
@@ -238,6 +238,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
|
|
|
PT64_EPT_EXECUTABLE_MASK;
|
|
|
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
|
|
|
|
|
|
+/*
|
|
|
+ * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
|
|
|
+ * to guard against L1TF attacks.
|
|
|
+ */
|
|
|
+static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
|
|
|
+
|
|
|
+/*
|
|
|
+ * The number of high-order 1 bits to use in the mask above.
|
|
|
+ */
|
|
|
+static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
|
|
|
+
|
|
|
static void mmu_spte_set(u64 *sptep, u64 spte);
|
|
|
static union kvm_mmu_page_role
|
|
|
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
|
|
@@ -327,9 +338,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
|
|
|
{
|
|
|
unsigned int gen = kvm_current_mmio_generation(vcpu);
|
|
|
u64 mask = generation_mmio_spte_mask(gen);
|
|
|
+ u64 gpa = gfn << PAGE_SHIFT;
|
|
|
|
|
|
access &= ACC_WRITE_MASK | ACC_USER_MASK;
|
|
|
- mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
|
|
|
+ mask |= shadow_mmio_value | access;
|
|
|
+ mask |= gpa | shadow_nonpresent_or_rsvd_mask;
|
|
|
+ mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
|
|
|
+ << shadow_nonpresent_or_rsvd_mask_len;
|
|
|
|
|
|
trace_mark_mmio_spte(sptep, gfn, access, gen);
|
|
|
mmu_spte_set(sptep, mask);
|
|
@@ -342,8 +357,14 @@ static bool is_mmio_spte(u64 spte)
|
|
|
|
|
|
static gfn_t get_mmio_spte_gfn(u64 spte)
|
|
|
{
|
|
|
- u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
|
|
|
- return (spte & ~mask) >> PAGE_SHIFT;
|
|
|
+ u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
|
|
|
+ shadow_nonpresent_or_rsvd_mask;
|
|
|
+ u64 gpa = spte & ~mask;
|
|
|
+
|
|
|
+ gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
|
|
|
+ & shadow_nonpresent_or_rsvd_mask;
|
|
|
+
|
|
|
+ return gpa >> PAGE_SHIFT;
|
|
|
}
|
|
|
|
|
|
static unsigned get_mmio_spte_access(u64 spte)
|
|
@@ -400,7 +421,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
|
|
|
|
|
|
-static void kvm_mmu_clear_all_pte_masks(void)
|
|
|
+static void kvm_mmu_reset_all_pte_masks(void)
|
|
|
{
|
|
|
shadow_user_mask = 0;
|
|
|
shadow_accessed_mask = 0;
|
|
@@ -410,6 +431,18 @@ static void kvm_mmu_clear_all_pte_masks(void)
|
|
|
shadow_mmio_mask = 0;
|
|
|
shadow_present_mask = 0;
|
|
|
shadow_acc_track_mask = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the CPU has 46 or less physical address bits, then set an
|
|
|
+ * appropriate mask to guard against L1TF attacks. Otherwise, it is
|
|
|
+ * assumed that the CPU is not vulnerable to L1TF.
|
|
|
+ */
|
|
|
+ if (boot_cpu_data.x86_phys_bits <
|
|
|
+ 52 - shadow_nonpresent_or_rsvd_mask_len)
|
|
|
+ shadow_nonpresent_or_rsvd_mask =
|
|
|
+ rsvd_bits(boot_cpu_data.x86_phys_bits -
|
|
|
+ shadow_nonpresent_or_rsvd_mask_len,
|
|
|
+ boot_cpu_data.x86_phys_bits - 1);
|
|
|
}
|
|
|
|
|
|
static int is_cpuid_PSE36(void)
|
|
@@ -5819,7 +5852,7 @@ int kvm_mmu_module_init(void)
|
|
|
{
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
- kvm_mmu_clear_all_pte_masks();
|
|
|
+ kvm_mmu_reset_all_pte_masks();
|
|
|
|
|
|
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
|
|
|
sizeof(struct pte_list_desc),
|