|
@@ -927,9 +927,10 @@ int track_pfn_copy(struct vm_area_struct *vma)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * prot is passed in as a parameter for the new mapping. If the vma has a
|
|
|
- * linear pfn mapping for the entire range reserve the entire vma range with
|
|
|
- * single reserve_pfn_range call.
|
|
|
+ * prot is passed in as a parameter for the new mapping. If the vma has
|
|
|
+ * a linear pfn mapping for the entire range, or no vma is provided,
|
|
|
+ * reserve the entire pfn + size range with single reserve_pfn_range
|
|
|
+ * call.
|
|
|
*/
|
|
|
int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
|
|
|
unsigned long pfn, unsigned long addr, unsigned long size)
|
|
@@ -938,11 +939,12 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
|
|
|
enum page_cache_mode pcm;
|
|
|
|
|
|
/* reserve the whole chunk starting from paddr */
|
|
|
- if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
|
|
|
+ if (!vma || (addr == vma->vm_start
|
|
|
+ && size == (vma->vm_end - vma->vm_start))) {
|
|
|
int ret;
|
|
|
|
|
|
ret = reserve_pfn_range(paddr, size, prot, 0);
|
|
|
- if (!ret)
|
|
|
+ if (ret == 0 && vma)
|
|
|
vma->vm_flags |= VM_PAT;
|
|
|
return ret;
|
|
|
}
|
|
@@ -997,7 +999,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
|
|
resource_size_t paddr;
|
|
|
unsigned long prot;
|
|
|
|
|
|
- if (!(vma->vm_flags & VM_PAT))
|
|
|
+ if (vma && !(vma->vm_flags & VM_PAT))
|
|
|
return;
|
|
|
|
|
|
/* free the chunk starting from pfn or the whole chunk */
|
|
@@ -1011,7 +1013,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
|
|
size = vma->vm_end - vma->vm_start;
|
|
|
}
|
|
|
free_pfn_range(paddr, size);
|
|
|
- vma->vm_flags &= ~VM_PAT;
|
|
|
+ if (vma)
|
|
|
+ vma->vm_flags &= ~VM_PAT;
|
|
|
}
|
|
|
|
|
|
/*
|