|
@@ -1299,6 +1299,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|
|
|
|
|
out_unlock:
|
|
|
spin_unlock(&kvm->mmu_lock);
|
|
|
+ kvm_set_pfn_accessed(pfn);
|
|
|
kvm_release_pfn_clean(pfn);
|
|
|
return ret;
|
|
|
}
|
|
@@ -1333,7 +1334,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
|
|
/* Check the stage-2 fault is trans. fault or write fault */
|
|
|
fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
|
|
|
- if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
|
|
|
+ if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
|
|
|
+ fault_status != FSC_ACCESS) {
|
|
|
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
|
|
|
kvm_vcpu_trap_get_class(vcpu),
|
|
|
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
|
|
@@ -1475,6 +1477,67 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
|
|
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
|
|
|
}
|
|
|
|
|
|
+static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
|
|
+{
|
|
|
+ pmd_t *pmd;
|
|
|
+ pte_t *pte;
|
|
|
+
|
|
|
+ pmd = stage2_get_pmd(kvm, NULL, gpa);
|
|
|
+ if (!pmd || pmd_none(*pmd)) /* Nothing there */
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
|
|
|
+ if (pmd_young(*pmd)) {
|
|
|
+ *pmd = pmd_mkold(*pmd);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ pte = pte_offset_kernel(pmd, gpa);
|
|
|
+ if (pte_none(*pte))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (pte_young(*pte)) {
|
|
|
+ *pte = pte_mkold(*pte); /* Just a page... */
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
|
|
+{
|
|
|
+ pmd_t *pmd;
|
|
|
+ pte_t *pte;
|
|
|
+
|
|
|
+ pmd = stage2_get_pmd(kvm, NULL, gpa);
|
|
|
+ if (!pmd || pmd_none(*pmd)) /* Nothing there */
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */
|
|
|
+ return pmd_young(*pmd);
|
|
|
+
|
|
|
+ pte = pte_offset_kernel(pmd, gpa);
|
|
|
+ if (!pte_none(*pte)) /* Just a page... */
|
|
|
+ return pte_young(*pte);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
|
|
|
+{
|
|
|
+ trace_kvm_age_hva(start, end);
|
|
|
+ return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
|
|
|
+}
|
|
|
+
|
|
|
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
|
|
+{
|
|
|
+ trace_kvm_test_age_hva(hva);
|
|
|
+ return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
|
|
|
+}
|
|
|
+
|
|
|
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
|