|
@@ -311,17 +311,36 @@ struct binder_error {
|
|
|
* @refs: list of references on this node
|
|
|
* @internal_strong_refs: used to take strong references when
|
|
|
* initiating a transaction
|
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
|
+ * and by @lock)
|
|
|
* @local_weak_refs: weak user refs from local process
|
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
|
+ * and by @lock)
|
|
|
* @local_strong_refs: strong user refs from local process
|
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
|
+ * and by @lock)
|
|
|
* @tmp_refs: temporary kernel refs
|
|
|
+ * (protected by @proc->inner_lock while @proc
|
|
|
+ * is valid, and by binder_dead_nodes_lock
|
|
|
+ * if @proc is NULL. During inc/dec and node release
|
|
|
+ * it is also protected by @lock to provide safety
|
|
|
+ * as the node dies and @proc becomes NULL)
|
|
|
* @ptr: userspace pointer for node
|
|
|
* (invariant, no lock needed)
|
|
|
* @cookie: userspace cookie for node
|
|
|
* (invariant, no lock needed)
|
|
|
* @has_strong_ref: userspace notified of strong ref
|
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
|
+ * and by @lock)
|
|
|
* @pending_strong_ref: userspace has acked notification of strong ref
|
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
|
+ * and by @lock)
|
|
|
* @has_weak_ref: userspace notified of weak ref
|
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
|
+ * and by @lock)
|
|
|
* @pending_weak_ref: userspace has acked notification of weak ref
|
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
|
+ * and by @lock)
|
|
|
* @has_async_transaction: async transaction to node in progress
|
|
|
* @accept_fds: file descriptor operations supported for node
|
|
|
* (invariant after initialized)
|
|
@@ -347,13 +366,24 @@ struct binder_node {
|
|
|
int tmp_refs;
|
|
|
binder_uintptr_t ptr;
|
|
|
binder_uintptr_t cookie;
|
|
|
- unsigned has_strong_ref:1;
|
|
|
- unsigned pending_strong_ref:1;
|
|
|
- unsigned has_weak_ref:1;
|
|
|
- unsigned pending_weak_ref:1;
|
|
|
- unsigned has_async_transaction:1;
|
|
|
- unsigned accept_fds:1;
|
|
|
- unsigned min_priority:8;
|
|
|
+ struct {
|
|
|
+ /*
|
|
|
+ * bitfield elements protected by
|
|
|
+ * proc inner_lock
|
|
|
+ */
|
|
|
+ u8 has_strong_ref:1;
|
|
|
+ u8 pending_strong_ref:1;
|
|
|
+ u8 has_weak_ref:1;
|
|
|
+ u8 pending_weak_ref:1;
|
|
|
+ };
|
|
|
+ struct {
|
|
|
+ /*
|
|
|
+ * invariant after initialization
|
|
|
+ */
|
|
|
+ u8 accept_fds:1;
|
|
|
+ u8 min_priority;
|
|
|
+ };
|
|
|
+ bool has_async_transaction;
|
|
|
struct list_head async_todo;
|
|
|
};
|
|
|
|
|
@@ -813,9 +843,18 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
|
|
|
return node;
|
|
|
}
|
|
|
|
|
|
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
|
|
|
- struct list_head *target_list)
|
|
|
+static void binder_free_node(struct binder_node *node)
|
|
|
{
|
|
|
+ kfree(node);
|
|
|
+ binder_stats_deleted(BINDER_STAT_NODE);
|
|
|
+}
|
|
|
+
|
|
|
+static int binder_inc_node_ilocked(struct binder_node *node, int strong,
|
|
|
+ int internal,
|
|
|
+ struct list_head *target_list)
|
|
|
+{
|
|
|
+ if (node->proc)
|
|
|
+ BUG_ON(!spin_is_locked(&node->proc->inner_lock));
|
|
|
if (strong) {
|
|
|
if (internal) {
|
|
|
if (target_list == NULL &&
|
|
@@ -849,23 +888,43 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
|
|
|
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
|
|
|
+ struct list_head *target_list)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (node->proc)
|
|
|
+ binder_inner_proc_lock(node->proc);
|
|
|
+ ret = binder_inc_node_ilocked(node, strong, internal, target_list);
|
|
|
+ if (node->proc)
|
|
|
+ binder_inner_proc_unlock(node->proc);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static bool binder_dec_node_ilocked(struct binder_node *node,
|
|
|
+ int strong, int internal)
|
|
|
{
|
|
|
+ struct binder_proc *proc = node->proc;
|
|
|
+
|
|
|
+ if (proc)
|
|
|
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
|
|
|
if (strong) {
|
|
|
if (internal)
|
|
|
node->internal_strong_refs--;
|
|
|
else
|
|
|
node->local_strong_refs--;
|
|
|
if (node->local_strong_refs || node->internal_strong_refs)
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
} else {
|
|
|
if (!internal)
|
|
|
node->local_weak_refs--;
|
|
|
if (node->local_weak_refs || node->tmp_refs ||
|
|
|
!hlist_empty(&node->refs))
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
}
|
|
|
- if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
|
|
|
+
|
|
|
+ if (proc && (node->has_strong_ref || node->has_weak_ref)) {
|
|
|
if (list_empty(&node->work.entry)) {
|
|
|
list_add_tail(&node->work.entry, &node->proc->todo);
|
|
|
wake_up_interruptible(&node->proc->wait);
|
|
@@ -874,25 +933,55 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
|
|
|
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
|
|
|
!node->local_weak_refs && !node->tmp_refs) {
|
|
|
list_del_init(&node->work.entry);
|
|
|
- if (node->proc) {
|
|
|
+ if (proc) {
|
|
|
rb_erase(&node->rb_node, &node->proc->nodes);
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
|
"refless node %d deleted\n",
|
|
|
node->debug_id);
|
|
|
} else {
|
|
|
spin_lock(&binder_dead_nodes_lock);
|
|
|
+ /*
|
|
|
+ * tmp_refs could have changed so
|
|
|
+ * check it again
|
|
|
+ */
|
|
|
+ if (node->tmp_refs) {
|
|
|
+ spin_unlock(&binder_dead_nodes_lock);
|
|
|
+ return false;
|
|
|
+ }
|
|
|
hlist_del(&node->dead_node);
|
|
|
spin_unlock(&binder_dead_nodes_lock);
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
|
"dead node %d deleted\n",
|
|
|
node->debug_id);
|
|
|
}
|
|
|
- kfree(node);
|
|
|
- binder_stats_deleted(BINDER_STAT_NODE);
|
|
|
+ return true;
|
|
|
}
|
|
|
}
|
|
|
+ return false;
|
|
|
+}
|
|
|
|
|
|
- return 0;
|
|
|
+static void binder_dec_node(struct binder_node *node, int strong, int internal)
|
|
|
+{
|
|
|
+ bool free_node;
|
|
|
+
|
|
|
+ if (node->proc)
|
|
|
+ binder_inner_proc_lock(node->proc);
|
|
|
+ free_node = binder_dec_node_ilocked(node, strong, internal);
|
|
|
+ if (node->proc)
|
|
|
+ binder_inner_proc_unlock(node->proc);
|
|
|
+
|
|
|
+ if (free_node)
|
|
|
+ binder_free_node(node);
|
|
|
+}
|
|
|
+
|
|
|
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * No call to binder_inc_node() is needed since we
|
|
|
+ * don't need to inform userspace of any changes to
|
|
|
+ * tmp_refs
|
|
|
+ */
|
|
|
+ node->tmp_refs++;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -900,16 +989,25 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
|
|
|
* @node: node to reference
|
|
|
*
|
|
|
* Take reference on node to prevent the node from being freed
|
|
|
- * while referenced only by a local variable
|
|
|
+ * while referenced only by a local variable. The inner lock is
|
|
|
+ * needed to serialize with the node work on the queue (which
|
|
|
+ * isn't needed after the node is dead). If the node is dead
|
|
|
+ * (node->proc is NULL), use binder_dead_nodes_lock to protect
|
|
|
+ * node->tmp_refs against dead-node-only cases where the node
|
|
|
+ * lock cannot be acquired (eg traversing the dead node list to
|
|
|
+ * print nodes)
|
|
|
*/
|
|
|
static void binder_inc_node_tmpref(struct binder_node *node)
|
|
|
{
|
|
|
- /*
|
|
|
- * No call to binder_inc_node() is needed since we
|
|
|
- * don't need to inform userspace of any changes to
|
|
|
- * tmp_refs
|
|
|
- */
|
|
|
- node->tmp_refs++;
|
|
|
+ if (node->proc)
|
|
|
+ binder_inner_proc_lock(node->proc);
|
|
|
+ else
|
|
|
+ spin_lock(&binder_dead_nodes_lock);
|
|
|
+ binder_inc_node_tmpref_ilocked(node);
|
|
|
+ if (node->proc)
|
|
|
+ binder_inner_proc_unlock(node->proc);
|
|
|
+ else
|
|
|
+ spin_unlock(&binder_dead_nodes_lock);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -920,15 +1018,27 @@ static void binder_inc_node_tmpref(struct binder_node *node)
|
|
|
*/
|
|
|
static void binder_dec_node_tmpref(struct binder_node *node)
|
|
|
{
|
|
|
+ bool free_node;
|
|
|
+
|
|
|
+ if (node->proc)
|
|
|
+ binder_inner_proc_lock(node->proc);
|
|
|
+ else
|
|
|
+ spin_lock(&binder_dead_nodes_lock);
|
|
|
node->tmp_refs--;
|
|
|
BUG_ON(node->tmp_refs < 0);
|
|
|
+ if (!node->proc)
|
|
|
+ spin_unlock(&binder_dead_nodes_lock);
|
|
|
/*
|
|
|
* Call binder_dec_node() to check if all refcounts are 0
|
|
|
* and cleanup is needed. Calling with strong=0 and internal=1
|
|
|
* causes no actual reference to be released in binder_dec_node().
|
|
|
* If that changes, a change is needed here too.
|
|
|
*/
|
|
|
- binder_dec_node(node, 0, 1);
|
|
|
+ free_node = binder_dec_node_ilocked(node, 0, 1);
|
|
|
+ if (node->proc)
|
|
|
+ binder_inner_proc_unlock(node->proc);
|
|
|
+ if (free_node)
|
|
|
+ binder_free_node(node);
|
|
|
}
|
|
|
|
|
|
static void binder_put_node(struct binder_node *node)
|
|
@@ -1041,6 +1151,9 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
|
|
|
|
|
|
static void binder_cleanup_ref(struct binder_ref *ref)
|
|
|
{
|
|
|
+ bool delete_node = false;
|
|
|
+ struct binder_proc *node_proc = ref->node->proc;
|
|
|
+
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
|
"%d delete ref %d desc %d for node %d\n",
|
|
|
ref->proc->pid, ref->data.debug_id, ref->data.desc,
|
|
@@ -1049,11 +1162,26 @@ static void binder_cleanup_ref(struct binder_ref *ref)
|
|
|
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
|
|
|
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
|
|
|
|
|
|
+ if (node_proc)
|
|
|
+ binder_inner_proc_lock(node_proc);
|
|
|
if (ref->data.strong)
|
|
|
- binder_dec_node(ref->node, 1, 1);
|
|
|
+ binder_dec_node_ilocked(ref->node, 1, 1);
|
|
|
|
|
|
hlist_del(&ref->node_entry);
|
|
|
- binder_dec_node(ref->node, 0, 1);
|
|
|
+ delete_node = binder_dec_node_ilocked(ref->node, 0, 1);
|
|
|
+ if (node_proc)
|
|
|
+ binder_inner_proc_unlock(node_proc);
|
|
|
+ /*
|
|
|
+ * Clear ref->node unless we want the caller to free the node
|
|
|
+ */
|
|
|
+ if (!delete_node) {
|
|
|
+ /*
|
|
|
+ * The caller uses ref->node to determine
|
|
|
+ * whether the node needs to be freed. Clear
|
|
|
+ * it since the node is still alive.
|
|
|
+ */
|
|
|
+ ref->node = NULL;
|
|
|
+ }
|
|
|
|
|
|
if (ref->death) {
|
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
@@ -1122,13 +1250,8 @@ static bool binder_dec_ref(struct binder_ref *ref, int strong)
|
|
|
return false;
|
|
|
}
|
|
|
ref->data.strong--;
|
|
|
- if (ref->data.strong == 0) {
|
|
|
- int ret;
|
|
|
-
|
|
|
- ret = binder_dec_node(ref->node, strong, 1);
|
|
|
- if (ret)
|
|
|
- return false;
|
|
|
- }
|
|
|
+ if (ref->data.strong == 0)
|
|
|
+ binder_dec_node(ref->node, strong, 1);
|
|
|
} else {
|
|
|
if (ref->data.weak == 0) {
|
|
|
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
|
|
@@ -1193,10 +1316,13 @@ err_no_ref:
|
|
|
* binder_free_ref() - free the binder_ref
|
|
|
* @ref: ref to free
|
|
|
*
|
|
|
- * Free the binder_ref and the binder_ref_death indicated by ref->death.
|
|
|
+ * Free the binder_ref. Free the binder_node indicated by ref->node
|
|
|
+ * (if non-NULL) and the binder_ref_death indicated by ref->death.
|
|
|
*/
|
|
|
static void binder_free_ref(struct binder_ref *ref)
|
|
|
{
|
|
|
+ if (ref->node)
|
|
|
+ binder_free_node(ref->node);
|
|
|
kfree(ref->death);
|
|
|
kfree(ref);
|
|
|
}
|
|
@@ -2687,11 +2813,13 @@ static int binder_thread_write(struct binder_proc *proc,
|
|
|
binder_put_node(node);
|
|
|
break;
|
|
|
}
|
|
|
+ binder_inner_proc_lock(proc);
|
|
|
if (cmd == BC_ACQUIRE_DONE) {
|
|
|
if (node->pending_strong_ref == 0) {
|
|
|
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
|
|
|
proc->pid, thread->pid,
|
|
|
node->debug_id);
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
binder_put_node(node);
|
|
|
break;
|
|
|
}
|
|
@@ -2701,11 +2829,13 @@ static int binder_thread_write(struct binder_proc *proc,
|
|
|
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
|
|
|
proc->pid, thread->pid,
|
|
|
node->debug_id);
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
binder_put_node(node);
|
|
|
break;
|
|
|
}
|
|
|
node->pending_weak_ref = 0;
|
|
|
}
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
|
|
|
binder_debug(BINDER_DEBUG_USER_REFS,
|
|
|
"%d:%d %s node %d ls %d lw %d tr %d\n",
|
|
@@ -3091,6 +3221,7 @@ retry:
|
|
|
struct binder_transaction *t = NULL;
|
|
|
struct binder_thread *t_from;
|
|
|
|
|
|
+ binder_inner_proc_lock(proc);
|
|
|
if (!list_empty(&thread->todo)) {
|
|
|
w = list_first_entry(&thread->todo, struct binder_work,
|
|
|
entry);
|
|
@@ -3104,11 +3235,15 @@ retry:
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (end - ptr < sizeof(tr) + 4)
|
|
|
+ if (end - ptr < sizeof(tr) + 4) {
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
break;
|
|
|
+ }
|
|
|
+ list_del_init(&w->entry);
|
|
|
|
|
|
switch (w->type) {
|
|
|
case BINDER_WORK_TRANSACTION: {
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
t = container_of(w, struct binder_transaction, work);
|
|
|
} break;
|
|
|
case BINDER_WORK_RETURN_ERROR: {
|
|
@@ -3116,15 +3251,16 @@ retry:
|
|
|
w, struct binder_error, work);
|
|
|
|
|
|
WARN_ON(e->cmd == BR_OK);
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
if (put_user(e->cmd, (uint32_t __user *)ptr))
|
|
|
return -EFAULT;
|
|
|
e->cmd = BR_OK;
|
|
|
ptr += sizeof(uint32_t);
|
|
|
|
|
|
binder_stat_br(proc, thread, cmd);
|
|
|
- list_del(&w->entry);
|
|
|
} break;
|
|
|
case BINDER_WORK_TRANSACTION_COMPLETE: {
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
cmd = BR_TRANSACTION_COMPLETE;
|
|
|
if (put_user(cmd, (uint32_t __user *)ptr))
|
|
|
return -EFAULT;
|
|
@@ -3134,8 +3270,6 @@ retry:
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
|
|
|
"%d:%d BR_TRANSACTION_COMPLETE\n",
|
|
|
proc->pid, thread->pid);
|
|
|
-
|
|
|
- list_del(&w->entry);
|
|
|
kfree(w);
|
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
|
|
} break;
|
|
@@ -3172,8 +3306,6 @@ retry:
|
|
|
node->has_strong_ref = 0;
|
|
|
if (!weak && has_weak_ref)
|
|
|
node->has_weak_ref = 0;
|
|
|
- list_del(&w->entry);
|
|
|
-
|
|
|
if (!weak && !strong) {
|
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
|
"%d:%d node %d u%016llx c%016llx deleted\n",
|
|
@@ -3182,9 +3314,11 @@ retry:
|
|
|
(u64)node_ptr,
|
|
|
(u64)node_cookie);
|
|
|
rb_erase(&node->rb_node, &proc->nodes);
|
|
|
- kfree(node);
|
|
|
- binder_stats_deleted(BINDER_STAT_NODE);
|
|
|
- }
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
+ binder_free_node(node);
|
|
|
+ } else
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
+
|
|
|
if (weak && !has_weak_ref)
|
|
|
ret = binder_put_node_cmd(
|
|
|
proc, thread, &ptr, node_ptr,
|
|
@@ -3226,6 +3360,13 @@ retry:
|
|
|
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
|
|
|
else
|
|
|
cmd = BR_DEAD_BINDER;
|
|
|
+ /*
|
|
|
+ * TODO: there is a race condition between
|
|
|
+ * death notification requests and delivery
|
|
|
+ * of the notifications. This will be handled
|
|
|
+ * in a later patch.
|
|
|
+ */
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
if (put_user(cmd, (uint32_t __user *)ptr))
|
|
|
return -EFAULT;
|
|
|
ptr += sizeof(uint32_t);
|
|
@@ -3243,11 +3384,14 @@ retry:
|
|
|
(u64)death->cookie);
|
|
|
|
|
|
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
|
|
|
- list_del(&w->entry);
|
|
|
kfree(death);
|
|
|
binder_stats_deleted(BINDER_STAT_DEATH);
|
|
|
- } else
|
|
|
- list_move(&w->entry, &proc->delivered_death);
|
|
|
+ } else {
|
|
|
+ binder_inner_proc_lock(proc);
|
|
|
+ list_add_tail(&w->entry,
|
|
|
+ &proc->delivered_death);
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
+ }
|
|
|
if (cmd == BR_DEAD_BINDER)
|
|
|
goto done; /* DEAD_BINDER notifications can cause transactions */
|
|
|
} break;
|
|
@@ -3325,7 +3469,6 @@ retry:
|
|
|
|
|
|
if (t_from)
|
|
|
binder_thread_dec_tmpref(t_from);
|
|
|
- list_del(&t->work.entry);
|
|
|
t->buffer->allow_user_free = 1;
|
|
|
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
|
|
|
t->to_parent = thread->transaction_stack;
|
|
@@ -3924,16 +4067,19 @@ static int binder_node_release(struct binder_node *node, int refs)
|
|
|
{
|
|
|
struct binder_ref *ref;
|
|
|
int death = 0;
|
|
|
+ struct binder_proc *proc = node->proc;
|
|
|
|
|
|
- list_del_init(&node->work.entry);
|
|
|
binder_release_work(&node->async_todo);
|
|
|
+
|
|
|
+ binder_inner_proc_lock(proc);
|
|
|
+ list_del_init(&node->work.entry);
|
|
|
/*
|
|
|
* The caller must have taken a temporary ref on the node,
|
|
|
*/
|
|
|
BUG_ON(!node->tmp_refs);
|
|
|
if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
|
|
|
- kfree(node);
|
|
|
- binder_stats_deleted(BINDER_STAT_NODE);
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
+ binder_free_node(node);
|
|
|
|
|
|
return refs;
|
|
|
}
|
|
@@ -3941,6 +4087,7 @@ static int binder_node_release(struct binder_node *node, int refs)
|
|
|
node->proc = NULL;
|
|
|
node->local_strong_refs = 0;
|
|
|
node->local_weak_refs = 0;
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
|
|
|
spin_lock(&binder_dead_nodes_lock);
|
|
|
hlist_add_head(&node->dead_node, &binder_dead_nodes);
|