|
@@ -62,9 +62,9 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
|
|
|
struct binder_buffer *buffer)
|
|
|
{
|
|
|
if (list_is_last(&buffer->entry, &alloc->buffers))
|
|
|
- return alloc->buffer +
|
|
|
- alloc->buffer_size - (void *)buffer->data;
|
|
|
- return (size_t)binder_buffer_next(buffer) - (size_t)buffer->data;
|
|
|
+ return (u8 *)alloc->buffer +
|
|
|
+ alloc->buffer_size - (u8 *)buffer->data;
|
|
|
+ return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
|
|
|
}
|
|
|
|
|
|
static void binder_insert_free_buffer(struct binder_alloc *alloc,
|
|
@@ -114,9 +114,9 @@ static void binder_insert_allocated_buffer_locked(
|
|
|
buffer = rb_entry(parent, struct binder_buffer, rb_node);
|
|
|
BUG_ON(buffer->free);
|
|
|
|
|
|
- if (new_buffer < buffer)
|
|
|
+ if (new_buffer->data < buffer->data)
|
|
|
p = &parent->rb_left;
|
|
|
- else if (new_buffer > buffer)
|
|
|
+ else if (new_buffer->data > buffer->data)
|
|
|
p = &parent->rb_right;
|
|
|
else
|
|
|
BUG();
|
|
@@ -131,18 +131,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
|
|
|
{
|
|
|
struct rb_node *n = alloc->allocated_buffers.rb_node;
|
|
|
struct binder_buffer *buffer;
|
|
|
- struct binder_buffer *kern_ptr;
|
|
|
+ void *kern_ptr;
|
|
|
|
|
|
- kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
|
|
|
- - offsetof(struct binder_buffer, data));
|
|
|
+ kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
|
|
|
|
|
|
while (n) {
|
|
|
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
|
|
BUG_ON(buffer->free);
|
|
|
|
|
|
- if (kern_ptr < buffer)
|
|
|
+ if (kern_ptr < buffer->data)
|
|
|
n = n->rb_left;
|
|
|
- else if (kern_ptr > buffer)
|
|
|
+ else if (kern_ptr > buffer->data)
|
|
|
n = n->rb_right;
|
|
|
else {
|
|
|
/*
|
|
@@ -330,6 +329,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
|
|
|
return ERR_PTR(-ENOSPC);
|
|
|
}
|
|
|
|
|
|
+ /* Pad 0-size buffers so they get assigned unique addresses */
|
|
|
+ size = max(size, sizeof(void *));
|
|
|
+
|
|
|
while (n) {
|
|
|
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
|
|
BUG_ON(!buffer->free);
|
|
@@ -389,14 +391,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
|
|
|
|
|
|
has_page_addr =
|
|
|
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
|
|
|
- if (n == NULL) {
|
|
|
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
|
|
|
- buffer_size = size; /* no room for other buffers */
|
|
|
- else
|
|
|
- buffer_size = size + sizeof(struct binder_buffer);
|
|
|
- }
|
|
|
+ WARN_ON(n && buffer_size != size);
|
|
|
end_page_addr =
|
|
|
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
|
|
|
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
|
|
|
if (end_page_addr > has_page_addr)
|
|
|
end_page_addr = has_page_addr;
|
|
|
ret = binder_update_page_range(alloc, 1,
|
|
@@ -404,17 +401,25 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
|
|
|
if (ret)
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
|
- rb_erase(best_fit, &alloc->free_buffers);
|
|
|
- buffer->free = 0;
|
|
|
- buffer->free_in_progress = 0;
|
|
|
- binder_insert_allocated_buffer_locked(alloc, buffer);
|
|
|
if (buffer_size != size) {
|
|
|
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
|
|
|
+ struct binder_buffer *new_buffer;
|
|
|
|
|
|
+ new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
|
|
+ if (!new_buffer) {
|
|
|
+ pr_err("%s: %d failed to alloc new buffer struct\n",
|
|
|
+ __func__, alloc->pid);
|
|
|
+ goto err_alloc_buf_struct_failed;
|
|
|
+ }
|
|
|
+ new_buffer->data = (u8 *)buffer->data + size;
|
|
|
list_add(&new_buffer->entry, &buffer->entry);
|
|
|
new_buffer->free = 1;
|
|
|
binder_insert_free_buffer(alloc, new_buffer);
|
|
|
}
|
|
|
+
|
|
|
+ rb_erase(best_fit, &alloc->free_buffers);
|
|
|
+ buffer->free = 0;
|
|
|
+ buffer->free_in_progress = 0;
|
|
|
+ binder_insert_allocated_buffer_locked(alloc, buffer);
|
|
|
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
|
"%d: binder_alloc_buf size %zd got %pK\n",
|
|
|
alloc->pid, size, buffer);
|
|
@@ -429,6 +434,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
|
|
|
alloc->pid, size, alloc->free_async_space);
|
|
|
}
|
|
|
return buffer;
|
|
|
+
|
|
|
+err_alloc_buf_struct_failed:
|
|
|
+ binder_update_page_range(alloc, 0,
|
|
|
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
|
|
|
+ end_page_addr, NULL);
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -463,56 +474,59 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
|
|
|
|
|
static void *buffer_start_page(struct binder_buffer *buffer)
|
|
|
{
|
|
|
- return (void *)((uintptr_t)buffer & PAGE_MASK);
|
|
|
+ return (void *)((uintptr_t)buffer->data & PAGE_MASK);
|
|
|
}
|
|
|
|
|
|
-static void *buffer_end_page(struct binder_buffer *buffer)
|
|
|
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
|
|
|
{
|
|
|
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
|
|
|
+ return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
|
|
|
}
|
|
|
|
|
|
static void binder_delete_free_buffer(struct binder_alloc *alloc,
|
|
|
struct binder_buffer *buffer)
|
|
|
{
|
|
|
struct binder_buffer *prev, *next = NULL;
|
|
|
- int free_page_end = 1;
|
|
|
- int free_page_start = 1;
|
|
|
-
|
|
|
+ bool to_free = true;
|
|
|
BUG_ON(alloc->buffers.next == &buffer->entry);
|
|
|
prev = binder_buffer_prev(buffer);
|
|
|
BUG_ON(!prev->free);
|
|
|
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
|
|
|
- free_page_start = 0;
|
|
|
- if (buffer_end_page(prev) == buffer_end_page(buffer))
|
|
|
- free_page_end = 0;
|
|
|
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
|
|
|
+ to_free = false;
|
|
|
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
|
- "%d: merge free, buffer %pK share page with %pK\n",
|
|
|
- alloc->pid, buffer, prev);
|
|
|
+ "%d: merge free, buffer %pK share page with %pK\n",
|
|
|
+ alloc->pid, buffer->data, prev->data);
|
|
|
}
|
|
|
|
|
|
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
|
|
|
next = binder_buffer_next(buffer);
|
|
|
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
|
|
|
- free_page_end = 0;
|
|
|
- if (buffer_start_page(next) ==
|
|
|
- buffer_start_page(buffer))
|
|
|
- free_page_start = 0;
|
|
|
+ if (buffer_start_page(next) == buffer_start_page(buffer)) {
|
|
|
+ to_free = false;
|
|
|
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
|
- "%d: merge free, buffer %pK share page with %pK\n",
|
|
|
- alloc->pid, buffer, prev);
|
|
|
+ "%d: merge free, buffer %pK share page with %pK\n",
|
|
|
+ alloc->pid,
|
|
|
+ buffer->data,
|
|
|
+ next->data);
|
|
|
}
|
|
|
}
|
|
|
- list_del(&buffer->entry);
|
|
|
- if (free_page_start || free_page_end) {
|
|
|
+
|
|
|
+ if (PAGE_ALIGNED(buffer->data)) {
|
|
|
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
|
+ "%d: merge free, buffer start %pK is page aligned\n",
|
|
|
+ alloc->pid, buffer->data);
|
|
|
+ to_free = false;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (to_free) {
|
|
|
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
|
- "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
|
|
|
- alloc->pid, buffer, free_page_start ? "" : " end",
|
|
|
- free_page_end ? "" : " start", prev, next);
|
|
|
- binder_update_page_range(alloc, 0, free_page_start ?
|
|
|
- buffer_start_page(buffer) : buffer_end_page(buffer),
|
|
|
- (free_page_end ? buffer_end_page(buffer) :
|
|
|
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
|
|
|
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
|
|
|
+ alloc->pid, buffer->data,
|
|
|
+ prev->data, next->data);
|
|
|
+ binder_update_page_range(alloc, 0, buffer_start_page(buffer),
|
|
|
+ buffer_start_page(buffer) + PAGE_SIZE,
|
|
|
+ NULL);
|
|
|
}
|
|
|
+ list_del(&buffer->entry);
|
|
|
+ kfree(buffer);
|
|
|
}
|
|
|
|
|
|
static void binder_free_buf_locked(struct binder_alloc *alloc,
|
|
@@ -533,8 +547,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
|
|
|
BUG_ON(buffer->free);
|
|
|
BUG_ON(size > buffer_size);
|
|
|
BUG_ON(buffer->transaction != NULL);
|
|
|
- BUG_ON((void *)buffer < alloc->buffer);
|
|
|
- BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
|
|
|
+ BUG_ON(buffer->data < alloc->buffer);
|
|
|
+ BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
|
|
|
|
|
|
if (buffer->async_transaction) {
|
|
|
alloc->free_async_space += size + sizeof(struct binder_buffer);
|
|
@@ -646,13 +660,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
|
|
}
|
|
|
alloc->buffer_size = vma->vm_end - vma->vm_start;
|
|
|
|
|
|
- if (binder_update_page_range(alloc, 1, alloc->buffer,
|
|
|
- alloc->buffer + PAGE_SIZE, vma)) {
|
|
|
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
|
|
+ if (!buffer) {
|
|
|
ret = -ENOMEM;
|
|
|
- failure_string = "alloc small buf";
|
|
|
- goto err_alloc_small_buf_failed;
|
|
|
+ failure_string = "alloc buffer struct";
|
|
|
+ goto err_alloc_buf_struct_failed;
|
|
|
}
|
|
|
- buffer = alloc->buffer;
|
|
|
+
|
|
|
+ buffer->data = alloc->buffer;
|
|
|
INIT_LIST_HEAD(&alloc->buffers);
|
|
|
list_add(&buffer->entry, &alloc->buffers);
|
|
|
buffer->free = 1;
|
|
@@ -664,7 +679,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
-err_alloc_small_buf_failed:
|
|
|
+err_alloc_buf_struct_failed:
|
|
|
kfree(alloc->pages);
|
|
|
alloc->pages = NULL;
|
|
|
err_alloc_pages_failed:
|
|
@@ -684,14 +699,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|
|
{
|
|
|
struct rb_node *n;
|
|
|
int buffers, page_count;
|
|
|
+ struct binder_buffer *buffer;
|
|
|
|
|
|
BUG_ON(alloc->vma);
|
|
|
|
|
|
buffers = 0;
|
|
|
mutex_lock(&alloc->mutex);
|
|
|
while ((n = rb_first(&alloc->allocated_buffers))) {
|
|
|
- struct binder_buffer *buffer;
|
|
|
-
|
|
|
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
|
|
|
|
|
/* Transaction should already have been freed */
|
|
@@ -701,6 +715,16 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|
|
buffers++;
|
|
|
}
|
|
|
|
|
|
+ while (!list_empty(&alloc->buffers)) {
|
|
|
+ buffer = list_first_entry(&alloc->buffers,
|
|
|
+ struct binder_buffer, entry);
|
|
|
+ WARN_ON(!buffer->free);
|
|
|
+
|
|
|
+ list_del(&buffer->entry);
|
|
|
+ WARN_ON_ONCE(!list_empty(&alloc->buffers));
|
|
|
+ kfree(buffer);
|
|
|
+ }
|
|
|
+
|
|
|
page_count = 0;
|
|
|
if (alloc->pages) {
|
|
|
int i;
|