|
@@ -386,18 +386,19 @@ struct vm_operations_struct {
|
|
|
void (*close)(struct vm_area_struct * area);
|
|
|
int (*split)(struct vm_area_struct * area, unsigned long addr);
|
|
|
int (*mremap)(struct vm_area_struct * area);
|
|
|
- int (*fault)(struct vm_fault *vmf);
|
|
|
- int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
|
|
|
+ vm_fault_t (*fault)(struct vm_fault *vmf);
|
|
|
+ vm_fault_t (*huge_fault)(struct vm_fault *vmf,
|
|
|
+ enum page_entry_size pe_size);
|
|
|
void (*map_pages)(struct vm_fault *vmf,
|
|
|
pgoff_t start_pgoff, pgoff_t end_pgoff);
|
|
|
unsigned long (*pagesize)(struct vm_area_struct * area);
|
|
|
|
|
|
/* notification that a previously read-only page is about to become
|
|
|
* writable, if an error is returned it will cause a SIGBUS */
|
|
|
- int (*page_mkwrite)(struct vm_fault *vmf);
|
|
|
+ vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
|
|
|
|
|
|
/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
|
|
|
- int (*pfn_mkwrite)(struct vm_fault *vmf);
|
|
|
+ vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
|
|
|
|
|
|
/* called by access_process_vm when get_user_pages() fails, typically
|
|
|
* for use by special VMAs that can switch between memory and hardware
|
|
@@ -2424,6 +2425,44 @@ int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
|
|
|
pfn_t pfn);
|
|
|
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
|
|
|
|
|
|
+static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
|
|
|
+ unsigned long addr, struct page *page)
|
|
|
+{
|
|
|
+ int err = vm_insert_page(vma, addr, page);
|
|
|
+
|
|
|
+ if (err == -ENOMEM)
|
|
|
+ return VM_FAULT_OOM;
|
|
|
+ if (err < 0 && err != -EBUSY)
|
|
|
+ return VM_FAULT_SIGBUS;
|
|
|
+
|
|
|
+ return VM_FAULT_NOPAGE;
|
|
|
+}
|
|
|
+
|
|
|
+static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma,
|
|
|
+ unsigned long addr, pfn_t pfn)
|
|
|
+{
|
|
|
+ int err = vm_insert_mixed(vma, addr, pfn);
|
|
|
+
|
|
|
+ if (err == -ENOMEM)
|
|
|
+ return VM_FAULT_OOM;
|
|
|
+ if (err < 0 && err != -EBUSY)
|
|
|
+ return VM_FAULT_SIGBUS;
|
|
|
+
|
|
|
+ return VM_FAULT_NOPAGE;
|
|
|
+}
|
|
|
+
|
|
|
+static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
|
|
|
+ unsigned long addr, unsigned long pfn)
|
|
|
+{
|
|
|
+ int err = vm_insert_pfn(vma, addr, pfn);
|
|
|
+
|
|
|
+ if (err == -ENOMEM)
|
|
|
+ return VM_FAULT_OOM;
|
|
|
+ if (err < 0 && err != -EBUSY)
|
|
|
+ return VM_FAULT_SIGBUS;
|
|
|
+
|
|
|
+ return VM_FAULT_NOPAGE;
|
|
|
+}
|
|
|
|
|
|
struct page *follow_page_mask(struct vm_area_struct *vma,
|
|
|
unsigned long address, unsigned int foll_flags,
|