|
@@ -332,6 +332,35 @@ err_no_vma:
|
|
return vma ? -ENOMEM : -ESRCH;
|
|
return vma ? -ENOMEM : -ESRCH;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+
|
|
|
|
+static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
|
|
|
|
+ struct vm_area_struct *vma)
|
|
|
|
+{
|
|
|
|
+ if (vma)
|
|
|
|
+ alloc->vma_vm_mm = vma->vm_mm;
|
|
|
|
+ /*
|
|
|
|
+ * If we see alloc->vma is not NULL, buffer data structures set up
|
|
|
|
+ * completely. Look at smp_rmb side binder_alloc_get_vma.
|
|
|
|
+ * We also want to guarantee new alloc->vma_vm_mm is always visible
|
|
|
|
+ * if alloc->vma is set.
|
|
|
|
+ */
|
|
|
|
+ smp_wmb();
|
|
|
|
+ alloc->vma = vma;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline struct vm_area_struct *binder_alloc_get_vma(
|
|
|
|
+ struct binder_alloc *alloc)
|
|
|
|
+{
|
|
|
|
+ struct vm_area_struct *vma = NULL;
|
|
|
|
+
|
|
|
|
+ if (alloc->vma) {
|
|
|
|
+ /* Look at description in binder_alloc_set_vma */
|
|
|
|
+ smp_rmb();
|
|
|
|
+ vma = alloc->vma;
|
|
|
|
+ }
|
|
|
|
+ return vma;
|
|
|
|
+}
|
|
|
|
+
|
|
static struct binder_buffer *binder_alloc_new_buf_locked(
|
|
static struct binder_buffer *binder_alloc_new_buf_locked(
|
|
struct binder_alloc *alloc,
|
|
struct binder_alloc *alloc,
|
|
size_t data_size,
|
|
size_t data_size,
|
|
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
|
size_t size, data_offsets_size;
|
|
size_t size, data_offsets_size;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- if (alloc->vma == NULL) {
|
|
|
|
|
|
+ if (!binder_alloc_get_vma(alloc)) {
|
|
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
|
|
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
|
|
"%d: binder_alloc_buf, no vma\n",
|
|
"%d: binder_alloc_buf, no vma\n",
|
|
alloc->pid);
|
|
alloc->pid);
|
|
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
|
buffer->free = 1;
|
|
buffer->free = 1;
|
|
binder_insert_free_buffer(alloc, buffer);
|
|
binder_insert_free_buffer(alloc, buffer);
|
|
alloc->free_async_space = alloc->buffer_size / 2;
|
|
alloc->free_async_space = alloc->buffer_size / 2;
|
|
- barrier();
|
|
|
|
- alloc->vma = vma;
|
|
|
|
- alloc->vma_vm_mm = vma->vm_mm;
|
|
|
|
|
|
+ binder_alloc_set_vma(alloc, vma);
|
|
mmgrab(alloc->vma_vm_mm);
|
|
mmgrab(alloc->vma_vm_mm);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|
int buffers, page_count;
|
|
int buffers, page_count;
|
|
struct binder_buffer *buffer;
|
|
struct binder_buffer *buffer;
|
|
|
|
|
|
- BUG_ON(alloc->vma);
|
|
|
|
-
|
|
|
|
buffers = 0;
|
|
buffers = 0;
|
|
mutex_lock(&alloc->mutex);
|
|
mutex_lock(&alloc->mutex);
|
|
|
|
+ BUG_ON(alloc->vma);
|
|
|
|
+
|
|
while ((n = rb_first(&alloc->allocated_buffers))) {
|
|
while ((n = rb_first(&alloc->allocated_buffers))) {
|
|
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
|
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
|
|
|
|
|
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
|
|
*/
|
|
*/
|
|
void binder_alloc_vma_close(struct binder_alloc *alloc)
|
|
void binder_alloc_vma_close(struct binder_alloc *alloc)
|
|
{
|
|
{
|
|
- WRITE_ONCE(alloc->vma, NULL);
|
|
|
|
|
|
+ binder_alloc_set_vma(alloc, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|
|
|
|
|
index = page - alloc->pages;
|
|
index = page - alloc->pages;
|
|
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
|
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
|
- vma = alloc->vma;
|
|
|
|
|
|
+ vma = binder_alloc_get_vma(alloc);
|
|
if (vma) {
|
|
if (vma) {
|
|
if (!mmget_not_zero(alloc->vma_vm_mm))
|
|
if (!mmget_not_zero(alloc->vma_vm_mm))
|
|
goto err_mmget;
|
|
goto err_mmget;
|