|
@@ -345,6 +345,44 @@ const struct mem_type *get_mem_type(unsigned int type)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(get_mem_type);
|
|
EXPORT_SYMBOL(get_mem_type);
|
|
|
|
|
|
|
|
+#define PTE_SET_FN(_name, pteop) \
|
|
|
|
+static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
|
|
|
|
+ void *data) \
|
|
|
|
+{ \
|
|
|
|
+ pte_t pte = pteop(*ptep); \
|
|
|
|
+\
|
|
|
|
+ set_pte_ext(ptep, pte, 0); \
|
|
|
|
+ return 0; \
|
|
|
|
+} \
|
|
|
|
+
|
|
|
|
+#define SET_MEMORY_FN(_name, callback) \
|
|
|
|
+int set_memory_##_name(unsigned long addr, int numpages) \
|
|
|
|
+{ \
|
|
|
|
+ unsigned long start = addr; \
|
|
|
|
+ unsigned long size = PAGE_SIZE*numpages; \
|
|
|
|
+ unsigned end = start + size; \
|
|
|
|
+\
|
|
|
|
+ if (start < MODULES_VADDR || start >= MODULES_END) \
|
|
|
|
+ return -EINVAL;\
|
|
|
|
+\
|
|
|
|
+ if (end < MODULES_VADDR || end >= MODULES_END) \
|
|
|
|
+ return -EINVAL; \
|
|
|
|
+\
|
|
|
|
+ apply_to_page_range(&init_mm, start, size, callback, NULL); \
|
|
|
|
+ flush_tlb_kernel_range(start, end); \
|
|
|
|
+ return 0;\
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+PTE_SET_FN(ro, pte_wrprotect)
|
|
|
|
+PTE_SET_FN(rw, pte_mkwrite)
|
|
|
|
+PTE_SET_FN(x, pte_mkexec)
|
|
|
|
+PTE_SET_FN(nx, pte_mknexec)
|
|
|
|
+
|
|
|
|
+SET_MEMORY_FN(ro, pte_set_ro)
|
|
|
|
+SET_MEMORY_FN(rw, pte_set_rw)
|
|
|
|
+SET_MEMORY_FN(x, pte_set_x)
|
|
|
|
+SET_MEMORY_FN(nx, pte_set_nx)
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Adjust the PMD section entries according to the CPU in use.
|
|
* Adjust the PMD section entries according to the CPU in use.
|
|
*/
|
|
*/
|