|
@@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
|
|
|
|
|
|
static bool largepages_enabled = true;
|
|
static bool largepages_enabled = true;
|
|
|
|
|
|
-bool kvm_is_mmio_pfn(pfn_t pfn)
|
|
|
|
|
|
+bool kvm_is_reserved_pfn(pfn_t pfn)
|
|
{
|
|
{
|
|
if (pfn_valid(pfn))
|
|
if (pfn_valid(pfn))
|
|
- return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
|
|
|
|
|
|
+ return PageReserved(pfn_to_page(pfn));
|
|
|
|
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
@@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
|
|
else if ((vma->vm_flags & VM_PFNMAP)) {
|
|
else if ((vma->vm_flags & VM_PFNMAP)) {
|
|
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
|
|
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
|
|
vma->vm_pgoff;
|
|
vma->vm_pgoff;
|
|
- BUG_ON(!kvm_is_mmio_pfn(pfn));
|
|
|
|
|
|
+ BUG_ON(!kvm_is_reserved_pfn(pfn));
|
|
} else {
|
|
} else {
|
|
if (async && vma_is_valid(vma, write_fault))
|
|
if (async && vma_is_valid(vma, write_fault))
|
|
*async = true;
|
|
*async = true;
|
|
@@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
|
|
if (is_error_noslot_pfn(pfn))
|
|
if (is_error_noslot_pfn(pfn))
|
|
return KVM_ERR_PTR_BAD_PAGE;
|
|
return KVM_ERR_PTR_BAD_PAGE;
|
|
|
|
|
|
- if (kvm_is_mmio_pfn(pfn)) {
|
|
|
|
|
|
+ if (kvm_is_reserved_pfn(pfn)) {
|
|
WARN_ON(1);
|
|
WARN_ON(1);
|
|
return KVM_ERR_PTR_BAD_PAGE;
|
|
return KVM_ERR_PTR_BAD_PAGE;
|
|
}
|
|
}
|
|
@@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
|
|
|
|
|
|
void kvm_release_pfn_clean(pfn_t pfn)
|
|
void kvm_release_pfn_clean(pfn_t pfn)
|
|
{
|
|
{
|
|
- if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
|
|
|
|
|
|
+ if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
|
|
put_page(pfn_to_page(pfn));
|
|
put_page(pfn_to_page(pfn));
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
|
|
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
|
|
@@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
|
|
|
|
|
|
void kvm_set_pfn_dirty(pfn_t pfn)
|
|
void kvm_set_pfn_dirty(pfn_t pfn)
|
|
{
|
|
{
|
|
- if (!kvm_is_mmio_pfn(pfn)) {
|
|
|
|
|
|
+ if (!kvm_is_reserved_pfn(pfn)) {
|
|
struct page *page = pfn_to_page(pfn);
|
|
struct page *page = pfn_to_page(pfn);
|
|
if (!PageReserved(page))
|
|
if (!PageReserved(page))
|
|
SetPageDirty(page);
|
|
SetPageDirty(page);
|
|
@@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
|
|
|
|
|
|
void kvm_set_pfn_accessed(pfn_t pfn)
|
|
void kvm_set_pfn_accessed(pfn_t pfn)
|
|
{
|
|
{
|
|
- if (!kvm_is_mmio_pfn(pfn))
|
|
|
|
|
|
+ if (!kvm_is_reserved_pfn(pfn))
|
|
mark_page_accessed(pfn_to_page(pfn));
|
|
mark_page_accessed(pfn_to_page(pfn));
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
|
|
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
|
|
|
|
|
|
void kvm_get_pfn(pfn_t pfn)
|
|
void kvm_get_pfn(pfn_t pfn)
|
|
{
|
|
{
|
|
- if (!kvm_is_mmio_pfn(pfn))
|
|
|
|
|
|
+ if (!kvm_is_reserved_pfn(pfn))
|
|
get_page(pfn_to_page(pfn));
|
|
get_page(pfn_to_page(pfn));
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_get_pfn);
|
|
EXPORT_SYMBOL_GPL(kvm_get_pfn);
|