|
@@ -551,7 +551,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
|
|
|
{
|
|
|
void *page_addr;
|
|
|
unsigned long user_page_addr;
|
|
|
- struct vm_struct tmp_area;
|
|
|
struct page **page;
|
|
|
struct mm_struct *mm;
|
|
|
|
|
@@ -600,10 +599,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
|
|
|
proc->pid, page_addr);
|
|
|
goto err_alloc_page_failed;
|
|
|
}
|
|
|
- tmp_area.addr = page_addr;
|
|
|
- tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
|
|
|
- ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
|
|
|
- if (ret) {
|
|
|
+ ret = map_kernel_range_noflush((unsigned long)page_addr,
|
|
|
+ PAGE_SIZE, PAGE_KERNEL, page);
|
|
|
+ flush_cache_vmap((unsigned long)page_addr,
|
|
|
+ (unsigned long)page_addr + PAGE_SIZE);
|
|
|
+ if (ret != 1) {
|
|
|
pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
|
|
|
proc->pid, page_addr);
|
|
|
goto err_map_kernel_failed;
|