|
@@ -913,6 +913,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|
|
struct binder_alloc *alloc;
|
|
|
uintptr_t page_addr;
|
|
|
size_t index;
|
|
|
+ struct vm_area_struct *vma;
|
|
|
|
|
|
alloc = page->alloc;
|
|
|
if (!mutex_trylock(&alloc->mutex))
|
|
@@ -923,16 +924,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|
|
|
|
|
index = page - alloc->pages;
|
|
|
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
|
|
- if (alloc->vma) {
|
|
|
+ vma = alloc->vma;
|
|
|
+ if (vma) {
|
|
|
mm = get_task_mm(alloc->tsk);
|
|
|
if (!mm)
|
|
|
goto err_get_task_mm_failed;
|
|
|
if (!down_write_trylock(&mm->mmap_sem))
|
|
|
goto err_down_write_mmap_sem_failed;
|
|
|
+ }
|
|
|
+
|
|
|
+ list_lru_isolate(lru, item);
|
|
|
+ spin_unlock(lock);
|
|
|
|
|
|
+ if (vma) {
|
|
|
trace_binder_unmap_user_start(alloc, index);
|
|
|
|
|
|
- zap_page_range(alloc->vma,
|
|
|
+ zap_page_range(vma,
|
|
|
page_addr + alloc->user_buffer_offset,
|
|
|
PAGE_SIZE);
|
|
|
|
|
@@ -950,13 +957,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|
|
|
|
|
trace_binder_unmap_kernel_end(alloc, index);
|
|
|
|
|
|
- list_lru_isolate(lru, item);
|
|
|
-
|
|
|
+ spin_lock(lock);
|
|
|
mutex_unlock(&alloc->mutex);
|
|
|
- return LRU_REMOVED;
|
|
|
+ return LRU_REMOVED_RETRY;
|
|
|
|
|
|
err_down_write_mmap_sem_failed:
|
|
|
- mmput(mm);
|
|
|
+ mmput_async(mm);
|
|
|
err_get_task_mm_failed:
|
|
|
err_page_already_freed:
|
|
|
mutex_unlock(&alloc->mutex);
|