|
@@ -28,10 +28,10 @@
|
|
* binder_node_lock() and binder_node_unlock() are
|
|
* binder_node_lock() and binder_node_unlock() are
|
|
* used to acq/rel
|
|
* used to acq/rel
|
|
* 3) proc->inner_lock : protects the thread and node lists
|
|
* 3) proc->inner_lock : protects the thread and node lists
|
|
- * (proc->threads, proc->nodes) and all todo lists associated
|
|
|
|
- * with the binder_proc (proc->todo, thread->todo,
|
|
|
|
- * proc->delivered_death and node->async_todo), as well as
|
|
|
|
- * thread->transaction_stack
|
|
|
|
|
|
+ * (proc->threads, proc->waiting_threads, proc->nodes)
|
|
|
|
+ * and all todo lists associated with the binder_proc
|
|
|
|
+ * (proc->todo, thread->todo, proc->delivered_death and
|
|
|
|
+ * node->async_todo), as well as thread->transaction_stack
|
|
* binder_inner_proc_lock() and binder_inner_proc_unlock()
|
|
* binder_inner_proc_lock() and binder_inner_proc_unlock()
|
|
* are used to acq/rel
|
|
* are used to acq/rel
|
|
*
|
|
*
|
|
@@ -475,6 +475,8 @@ enum binder_deferred_state {
|
|
* (protected by @outer_lock)
|
|
* (protected by @outer_lock)
|
|
* @refs_by_node: rbtree of refs ordered by ref->node
|
|
* @refs_by_node: rbtree of refs ordered by ref->node
|
|
* (protected by @outer_lock)
|
|
* (protected by @outer_lock)
|
|
|
|
+ * @waiting_threads: threads currently waiting for proc work
|
|
|
|
+ * (protected by @inner_lock)
|
|
* @pid PID of group_leader of process
|
|
* @pid PID of group_leader of process
|
|
* (invariant after initialized)
|
|
* (invariant after initialized)
|
|
* @tsk task_struct for group_leader of process
|
|
* @tsk task_struct for group_leader of process
|
|
@@ -504,8 +506,6 @@ enum binder_deferred_state {
|
|
* (protected by @inner_lock)
|
|
* (protected by @inner_lock)
|
|
* @requested_threads_started: number binder threads started
|
|
* @requested_threads_started: number binder threads started
|
|
* (protected by @inner_lock)
|
|
* (protected by @inner_lock)
|
|
- * @ready_threads: number of threads waiting for proc work
|
|
|
|
- * (protected by @inner_lock)
|
|
|
|
* @tmp_ref: temporary reference to indicate proc is in use
|
|
* @tmp_ref: temporary reference to indicate proc is in use
|
|
* (protected by @inner_lock)
|
|
* (protected by @inner_lock)
|
|
* @default_priority: default scheduler priority
|
|
* @default_priority: default scheduler priority
|
|
@@ -526,6 +526,7 @@ struct binder_proc {
|
|
struct rb_root nodes;
|
|
struct rb_root nodes;
|
|
struct rb_root refs_by_desc;
|
|
struct rb_root refs_by_desc;
|
|
struct rb_root refs_by_node;
|
|
struct rb_root refs_by_node;
|
|
|
|
+ struct list_head waiting_threads;
|
|
int pid;
|
|
int pid;
|
|
struct task_struct *tsk;
|
|
struct task_struct *tsk;
|
|
struct files_struct *files;
|
|
struct files_struct *files;
|
|
@@ -540,7 +541,6 @@ struct binder_proc {
|
|
int max_threads;
|
|
int max_threads;
|
|
int requested_threads;
|
|
int requested_threads;
|
|
int requested_threads_started;
|
|
int requested_threads_started;
|
|
- int ready_threads;
|
|
|
|
int tmp_ref;
|
|
int tmp_ref;
|
|
long default_priority;
|
|
long default_priority;
|
|
struct dentry *debugfs_entry;
|
|
struct dentry *debugfs_entry;
|
|
@@ -556,6 +556,7 @@ enum {
|
|
BINDER_LOOPER_STATE_EXITED = 0x04,
|
|
BINDER_LOOPER_STATE_EXITED = 0x04,
|
|
BINDER_LOOPER_STATE_INVALID = 0x08,
|
|
BINDER_LOOPER_STATE_INVALID = 0x08,
|
|
BINDER_LOOPER_STATE_WAITING = 0x10,
|
|
BINDER_LOOPER_STATE_WAITING = 0x10,
|
|
|
|
+ BINDER_LOOPER_STATE_POLL = 0x20,
|
|
};
|
|
};
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -564,6 +565,8 @@ enum {
|
|
* (invariant after initialization)
|
|
* (invariant after initialization)
|
|
* @rb_node: element for proc->threads rbtree
|
|
* @rb_node: element for proc->threads rbtree
|
|
* (protected by @proc->inner_lock)
|
|
* (protected by @proc->inner_lock)
|
|
|
|
+ * @waiting_thread_node: element for @proc->waiting_threads list
|
|
|
|
+ * (protected by @proc->inner_lock)
|
|
* @pid: PID for this thread
|
|
* @pid: PID for this thread
|
|
* (invariant after initialization)
|
|
* (invariant after initialization)
|
|
* @looper: bitmap of looping state
|
|
* @looper: bitmap of looping state
|
|
@@ -593,6 +596,7 @@ enum {
|
|
struct binder_thread {
|
|
struct binder_thread {
|
|
struct binder_proc *proc;
|
|
struct binder_proc *proc;
|
|
struct rb_node rb_node;
|
|
struct rb_node rb_node;
|
|
|
|
+ struct list_head waiting_thread_node;
|
|
int pid;
|
|
int pid;
|
|
int looper; /* only modified by this thread */
|
|
int looper; /* only modified by this thread */
|
|
bool looper_need_return; /* can be written by other thread */
|
|
bool looper_need_return; /* can be written by other thread */
|
|
@@ -920,6 +924,86 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
|
|
return retval;
|
|
return retval;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool binder_has_work_ilocked(struct binder_thread *thread,
|
|
|
|
+ bool do_proc_work)
|
|
|
|
+{
|
|
|
|
+ return !binder_worklist_empty_ilocked(&thread->todo) ||
|
|
|
|
+ thread->looper_need_return ||
|
|
|
|
+ (do_proc_work &&
|
|
|
|
+ !binder_worklist_empty_ilocked(&thread->proc->todo));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
|
|
|
|
+{
|
|
|
|
+ bool has_work;
|
|
|
|
+
|
|
|
|
+ binder_inner_proc_lock(thread->proc);
|
|
|
|
+ has_work = binder_has_work_ilocked(thread, do_proc_work);
|
|
|
|
+ binder_inner_proc_unlock(thread->proc);
|
|
|
|
+
|
|
|
|
+ return has_work;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
|
|
|
|
+{
|
|
|
|
+ return !thread->transaction_stack &&
|
|
|
|
+ binder_worklist_empty_ilocked(&thread->todo) &&
|
|
|
|
+ (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
|
|
|
|
+ BINDER_LOOPER_STATE_REGISTERED));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
|
|
|
|
+ bool sync)
|
|
|
|
+{
|
|
|
|
+ struct rb_node *n;
|
|
|
|
+ struct binder_thread *thread;
|
|
|
|
+
|
|
|
|
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
|
|
|
|
+ thread = rb_entry(n, struct binder_thread, rb_node);
|
|
|
|
+ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
|
|
|
|
+ binder_available_for_proc_work_ilocked(thread)) {
|
|
|
|
+ if (sync)
|
|
|
|
+ wake_up_interruptible_sync(&thread->wait);
|
|
|
|
+ else
|
|
|
|
+ wake_up_interruptible(&thread->wait);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void binder_wakeup_proc_ilocked(struct binder_proc *proc, bool sync)
|
|
|
|
+{
|
|
|
|
+ struct binder_thread *thread;
|
|
|
|
+
|
|
|
|
+ BUG_ON(!spin_is_locked(&proc->inner_lock));
|
|
|
|
+ thread = list_first_entry_or_null(&proc->waiting_threads,
|
|
|
|
+ struct binder_thread,
|
|
|
|
+ waiting_thread_node);
|
|
|
|
+
|
|
|
|
+ if (thread) {
|
|
|
|
+ list_del_init(&thread->waiting_thread_node);
|
|
|
|
+ if (sync)
|
|
|
|
+ wake_up_interruptible_sync(&thread->wait);
|
|
|
|
+ else
|
|
|
|
+ wake_up_interruptible(&thread->wait);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Didn't find a thread waiting for proc work; this can happen
|
|
|
|
+ * in two scenarios:
|
|
|
|
+ * 1. All threads are busy handling transactions
|
|
|
|
+ * In that case, one of those threads should call back into
|
|
|
|
+ * the kernel driver soon and pick up this work.
|
|
|
|
+ * 2. Threads are using the (e)poll interface, in which case
|
|
|
|
+ * they may be blocked on the waitqueue without having been
|
|
|
|
+ * added to waiting_threads. For this case, we just iterate
|
|
|
|
+ * over all threads not handling transaction work, and
|
|
|
|
+ * wake them all up. We wake all because we don't know whether
|
|
|
|
+ * a thread that called into (e)poll is handling non-binder
|
|
|
|
+ * work currently.
|
|
|
|
+ */
|
|
|
|
+ binder_wakeup_poll_threads_ilocked(proc, sync);
|
|
|
|
+}
|
|
|
|
+
|
|
static void binder_set_nice(long nice)
|
|
static void binder_set_nice(long nice)
|
|
{
|
|
{
|
|
long min_nice;
|
|
long min_nice;
|
|
@@ -1138,7 +1222,7 @@ static bool binder_dec_node_nilocked(struct binder_node *node,
|
|
if (proc && (node->has_strong_ref || node->has_weak_ref)) {
|
|
if (proc && (node->has_strong_ref || node->has_weak_ref)) {
|
|
if (list_empty(&node->work.entry)) {
|
|
if (list_empty(&node->work.entry)) {
|
|
binder_enqueue_work_ilocked(&node->work, &proc->todo);
|
|
binder_enqueue_work_ilocked(&node->work, &proc->todo);
|
|
- wake_up_interruptible(&node->proc->wait);
|
|
|
|
|
|
+ binder_wakeup_proc_ilocked(proc, false);
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
|
|
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
|
|
@@ -2399,7 +2483,6 @@ static void binder_transaction(struct binder_proc *proc,
|
|
struct binder_thread *target_thread = NULL;
|
|
struct binder_thread *target_thread = NULL;
|
|
struct binder_node *target_node = NULL;
|
|
struct binder_node *target_node = NULL;
|
|
struct list_head *target_list;
|
|
struct list_head *target_list;
|
|
- wait_queue_head_t *target_wait;
|
|
|
|
struct binder_transaction *in_reply_to = NULL;
|
|
struct binder_transaction *in_reply_to = NULL;
|
|
struct binder_transaction_log_entry *e;
|
|
struct binder_transaction_log_entry *e;
|
|
uint32_t return_error = 0;
|
|
uint32_t return_error = 0;
|
|
@@ -2409,6 +2492,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|
binder_size_t last_fixup_min_off = 0;
|
|
binder_size_t last_fixup_min_off = 0;
|
|
struct binder_context *context = proc->context;
|
|
struct binder_context *context = proc->context;
|
|
int t_debug_id = atomic_inc_return(&binder_last_id);
|
|
int t_debug_id = atomic_inc_return(&binder_last_id);
|
|
|
|
+ bool wakeup_for_proc_work = false;
|
|
|
|
|
|
e = binder_transaction_log_add(&binder_transaction_log);
|
|
e = binder_transaction_log_add(&binder_transaction_log);
|
|
e->debug_id = t_debug_id;
|
|
e->debug_id = t_debug_id;
|
|
@@ -2572,10 +2656,9 @@ static void binder_transaction(struct binder_proc *proc,
|
|
if (target_thread) {
|
|
if (target_thread) {
|
|
e->to_thread = target_thread->pid;
|
|
e->to_thread = target_thread->pid;
|
|
target_list = &target_thread->todo;
|
|
target_list = &target_thread->todo;
|
|
- target_wait = &target_thread->wait;
|
|
|
|
} else {
|
|
} else {
|
|
target_list = &target_proc->todo;
|
|
target_list = &target_proc->todo;
|
|
- target_wait = &target_proc->wait;
|
|
|
|
|
|
+ wakeup_for_proc_work = true;
|
|
}
|
|
}
|
|
e->to_proc = target_proc->pid;
|
|
e->to_proc = target_proc->pid;
|
|
|
|
|
|
@@ -2882,7 +2965,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|
binder_node_lock(target_node);
|
|
binder_node_lock(target_node);
|
|
if (target_node->has_async_transaction) {
|
|
if (target_node->has_async_transaction) {
|
|
target_list = &target_node->async_todo;
|
|
target_list = &target_node->async_todo;
|
|
- target_wait = NULL;
|
|
|
|
|
|
+ wakeup_for_proc_work = false;
|
|
} else
|
|
} else
|
|
target_node->has_async_transaction = 1;
|
|
target_node->has_async_transaction = 1;
|
|
/*
|
|
/*
|
|
@@ -2901,11 +2984,13 @@ static void binder_transaction(struct binder_proc *proc,
|
|
binder_inner_proc_unlock(target_proc);
|
|
binder_inner_proc_unlock(target_proc);
|
|
binder_node_unlock(target_node);
|
|
binder_node_unlock(target_node);
|
|
}
|
|
}
|
|
- if (target_wait) {
|
|
|
|
- if (reply || !(tr->flags & TF_ONE_WAY))
|
|
|
|
- wake_up_interruptible_sync(target_wait);
|
|
|
|
- else
|
|
|
|
- wake_up_interruptible(target_wait);
|
|
|
|
|
|
+ if (target_thread) {
|
|
|
|
+ wake_up_interruptible_sync(&target_thread->wait);
|
|
|
|
+ } else if (wakeup_for_proc_work) {
|
|
|
|
+ binder_inner_proc_lock(target_proc);
|
|
|
|
+ binder_wakeup_proc_ilocked(target_proc,
|
|
|
|
+ !(tr->flags & TF_ONE_WAY));
|
|
|
|
+ binder_inner_proc_unlock(target_proc);
|
|
}
|
|
}
|
|
if (target_thread)
|
|
if (target_thread)
|
|
binder_thread_dec_tmpref(target_thread);
|
|
binder_thread_dec_tmpref(target_thread);
|
|
@@ -3345,12 +3430,14 @@ static int binder_thread_write(struct binder_proc *proc,
|
|
&ref->death->work,
|
|
&ref->death->work,
|
|
&thread->todo);
|
|
&thread->todo);
|
|
else {
|
|
else {
|
|
- binder_enqueue_work(
|
|
|
|
- proc,
|
|
|
|
|
|
+ binder_inner_proc_lock(proc);
|
|
|
|
+ binder_enqueue_work_ilocked(
|
|
&ref->death->work,
|
|
&ref->death->work,
|
|
&proc->todo);
|
|
&proc->todo);
|
|
- wake_up_interruptible(
|
|
|
|
- &proc->wait);
|
|
|
|
|
|
+ binder_wakeup_proc_ilocked(
|
|
|
|
+ proc,
|
|
|
|
+ false);
|
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
@@ -3385,8 +3472,9 @@ static int binder_thread_write(struct binder_proc *proc,
|
|
binder_enqueue_work_ilocked(
|
|
binder_enqueue_work_ilocked(
|
|
&death->work,
|
|
&death->work,
|
|
&proc->todo);
|
|
&proc->todo);
|
|
- wake_up_interruptible(
|
|
|
|
- &proc->wait);
|
|
|
|
|
|
+ binder_wakeup_proc_ilocked(
|
|
|
|
+ proc,
|
|
|
|
+ false);
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
|
|
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
|
|
@@ -3441,7 +3529,8 @@ static int binder_thread_write(struct binder_proc *proc,
|
|
binder_enqueue_work_ilocked(
|
|
binder_enqueue_work_ilocked(
|
|
&death->work,
|
|
&death->work,
|
|
&proc->todo);
|
|
&proc->todo);
|
|
- wake_up_interruptible(&proc->wait);
|
|
|
|
|
|
+ binder_wakeup_proc_ilocked(
|
|
|
|
+ proc, false);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
binder_inner_proc_unlock(proc);
|
|
@@ -3468,13 +3557,6 @@ static void binder_stat_br(struct binder_proc *proc,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static int binder_has_proc_work(struct binder_proc *proc,
|
|
|
|
- struct binder_thread *thread)
|
|
|
|
-{
|
|
|
|
- return !binder_worklist_empty(proc, &proc->todo) ||
|
|
|
|
- thread->looper_need_return;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int binder_has_thread_work(struct binder_thread *thread)
|
|
static int binder_has_thread_work(struct binder_thread *thread)
|
|
{
|
|
{
|
|
return !binder_worklist_empty(thread->proc, &thread->todo) ||
|
|
return !binder_worklist_empty(thread->proc, &thread->todo) ||
|
|
@@ -3512,6 +3594,38 @@ static int binder_put_node_cmd(struct binder_proc *proc,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int binder_wait_for_work(struct binder_thread *thread,
|
|
|
|
+ bool do_proc_work)
|
|
|
|
+{
|
|
|
|
+ DEFINE_WAIT(wait);
|
|
|
|
+ struct binder_proc *proc = thread->proc;
|
|
|
|
+ int ret = 0;
|
|
|
|
+
|
|
|
|
+ freezer_do_not_count();
|
|
|
|
+ binder_inner_proc_lock(proc);
|
|
|
|
+ for (;;) {
|
|
|
|
+ prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
|
|
|
|
+ if (binder_has_work_ilocked(thread, do_proc_work))
|
|
|
|
+ break;
|
|
|
|
+ if (do_proc_work)
|
|
|
|
+ list_add(&thread->waiting_thread_node,
|
|
|
|
+ &proc->waiting_threads);
|
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
|
+ schedule();
|
|
|
|
+ binder_inner_proc_lock(proc);
|
|
|
|
+ list_del_init(&thread->waiting_thread_node);
|
|
|
|
+ if (signal_pending(current)) {
|
|
|
|
+ ret = -ERESTARTSYS;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ finish_wait(&thread->wait, &wait);
|
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
|
+ freezer_count();
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
static int binder_thread_read(struct binder_proc *proc,
|
|
static int binder_thread_read(struct binder_proc *proc,
|
|
struct binder_thread *thread,
|
|
struct binder_thread *thread,
|
|
binder_uintptr_t binder_buffer, size_t size,
|
|
binder_uintptr_t binder_buffer, size_t size,
|
|
@@ -3532,10 +3646,7 @@ static int binder_thread_read(struct binder_proc *proc,
|
|
|
|
|
|
retry:
|
|
retry:
|
|
binder_inner_proc_lock(proc);
|
|
binder_inner_proc_lock(proc);
|
|
- wait_for_proc_work = thread->transaction_stack == NULL &&
|
|
|
|
- binder_worklist_empty_ilocked(&thread->todo);
|
|
|
|
- if (wait_for_proc_work)
|
|
|
|
- proc->ready_threads++;
|
|
|
|
|
|
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
|
|
binder_inner_proc_unlock(proc);
|
|
binder_inner_proc_unlock(proc);
|
|
|
|
|
|
thread->looper |= BINDER_LOOPER_STATE_WAITING;
|
|
thread->looper |= BINDER_LOOPER_STATE_WAITING;
|
|
@@ -3552,23 +3663,15 @@ retry:
|
|
binder_stop_on_user_error < 2);
|
|
binder_stop_on_user_error < 2);
|
|
}
|
|
}
|
|
binder_set_nice(proc->default_priority);
|
|
binder_set_nice(proc->default_priority);
|
|
- if (non_block) {
|
|
|
|
- if (!binder_has_proc_work(proc, thread))
|
|
|
|
- ret = -EAGAIN;
|
|
|
|
- } else
|
|
|
|
- ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
|
|
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (non_block) {
|
|
|
|
+ if (!binder_has_work(thread, wait_for_proc_work))
|
|
|
|
+ ret = -EAGAIN;
|
|
} else {
|
|
} else {
|
|
- if (non_block) {
|
|
|
|
- if (!binder_has_thread_work(thread))
|
|
|
|
- ret = -EAGAIN;
|
|
|
|
- } else
|
|
|
|
- ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
|
|
|
|
|
|
+ ret = binder_wait_for_work(thread, wait_for_proc_work);
|
|
}
|
|
}
|
|
|
|
|
|
- binder_inner_proc_lock(proc);
|
|
|
|
- if (wait_for_proc_work)
|
|
|
|
- proc->ready_threads--;
|
|
|
|
- binder_inner_proc_unlock(proc);
|
|
|
|
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
|
|
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
|
|
|
|
|
|
if (ret)
|
|
if (ret)
|
|
@@ -3854,7 +3957,8 @@ done:
|
|
|
|
|
|
*consumed = ptr - buffer;
|
|
*consumed = ptr - buffer;
|
|
binder_inner_proc_lock(proc);
|
|
binder_inner_proc_lock(proc);
|
|
- if (proc->requested_threads + proc->ready_threads == 0 &&
|
|
|
|
|
|
+ if (proc->requested_threads == 0 &&
|
|
|
|
+ list_empty(&thread->proc->waiting_threads) &&
|
|
proc->requested_threads_started < proc->max_threads &&
|
|
proc->requested_threads_started < proc->max_threads &&
|
|
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
|
|
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
|
|
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
|
|
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
|
|
@@ -3965,7 +4069,7 @@ static struct binder_thread *binder_get_thread_ilocked(
|
|
thread->return_error.cmd = BR_OK;
|
|
thread->return_error.cmd = BR_OK;
|
|
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
|
|
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
|
|
thread->reply_error.cmd = BR_OK;
|
|
thread->reply_error.cmd = BR_OK;
|
|
-
|
|
|
|
|
|
+ INIT_LIST_HEAD(&new_thread->waiting_thread_node);
|
|
return thread;
|
|
return thread;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4078,28 +4182,24 @@ static unsigned int binder_poll(struct file *filp,
|
|
{
|
|
{
|
|
struct binder_proc *proc = filp->private_data;
|
|
struct binder_proc *proc = filp->private_data;
|
|
struct binder_thread *thread = NULL;
|
|
struct binder_thread *thread = NULL;
|
|
- int wait_for_proc_work;
|
|
|
|
|
|
+ bool wait_for_proc_work;
|
|
|
|
|
|
thread = binder_get_thread(proc);
|
|
thread = binder_get_thread(proc);
|
|
|
|
|
|
binder_inner_proc_lock(thread->proc);
|
|
binder_inner_proc_lock(thread->proc);
|
|
- wait_for_proc_work = thread->transaction_stack == NULL &&
|
|
|
|
- binder_worklist_empty_ilocked(&thread->todo);
|
|
|
|
|
|
+ thread->looper |= BINDER_LOOPER_STATE_POLL;
|
|
|
|
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
|
|
|
|
+
|
|
binder_inner_proc_unlock(thread->proc);
|
|
binder_inner_proc_unlock(thread->proc);
|
|
|
|
|
|
- if (wait_for_proc_work) {
|
|
|
|
- if (binder_has_proc_work(proc, thread))
|
|
|
|
- return POLLIN;
|
|
|
|
- poll_wait(filp, &proc->wait, wait);
|
|
|
|
- if (binder_has_proc_work(proc, thread))
|
|
|
|
- return POLLIN;
|
|
|
|
- } else {
|
|
|
|
- if (binder_has_thread_work(thread))
|
|
|
|
- return POLLIN;
|
|
|
|
- poll_wait(filp, &thread->wait, wait);
|
|
|
|
- if (binder_has_thread_work(thread))
|
|
|
|
- return POLLIN;
|
|
|
|
- }
|
|
|
|
|
|
+ if (binder_has_work(thread, wait_for_proc_work))
|
|
|
|
+ return POLLIN;
|
|
|
|
+
|
|
|
|
+ poll_wait(filp, &thread->wait, wait);
|
|
|
|
+
|
|
|
|
+ if (binder_has_thread_work(thread))
|
|
|
|
+ return POLLIN;
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4146,8 +4246,10 @@ static int binder_ioctl_write_read(struct file *filp,
|
|
&bwr.read_consumed,
|
|
&bwr.read_consumed,
|
|
filp->f_flags & O_NONBLOCK);
|
|
filp->f_flags & O_NONBLOCK);
|
|
trace_binder_read_done(ret);
|
|
trace_binder_read_done(ret);
|
|
- if (!binder_worklist_empty(proc, &proc->todo))
|
|
|
|
- wake_up_interruptible(&proc->wait);
|
|
|
|
|
|
+ binder_inner_proc_lock(proc);
|
|
|
|
+ if (!binder_worklist_empty_ilocked(&proc->todo))
|
|
|
|
+ binder_wakeup_proc_ilocked(proc, false);
|
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
if (ret < 0) {
|
|
if (ret < 0) {
|
|
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
|
|
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
|
|
ret = -EFAULT;
|
|
ret = -EFAULT;
|
|
@@ -4389,7 +4491,6 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
|
get_task_struct(current->group_leader);
|
|
get_task_struct(current->group_leader);
|
|
proc->tsk = current->group_leader;
|
|
proc->tsk = current->group_leader;
|
|
INIT_LIST_HEAD(&proc->todo);
|
|
INIT_LIST_HEAD(&proc->todo);
|
|
- init_waitqueue_head(&proc->wait);
|
|
|
|
proc->default_priority = task_nice(current);
|
|
proc->default_priority = task_nice(current);
|
|
binder_dev = container_of(filp->private_data, struct binder_device,
|
|
binder_dev = container_of(filp->private_data, struct binder_device,
|
|
miscdev);
|
|
miscdev);
|
|
@@ -4399,6 +4500,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
|
binder_stats_created(BINDER_STAT_PROC);
|
|
binder_stats_created(BINDER_STAT_PROC);
|
|
proc->pid = current->group_leader->pid;
|
|
proc->pid = current->group_leader->pid;
|
|
INIT_LIST_HEAD(&proc->delivered_death);
|
|
INIT_LIST_HEAD(&proc->delivered_death);
|
|
|
|
+ INIT_LIST_HEAD(&proc->waiting_threads);
|
|
filp->private_data = proc;
|
|
filp->private_data = proc;
|
|
|
|
|
|
mutex_lock(&binder_procs_lock);
|
|
mutex_lock(&binder_procs_lock);
|
|
@@ -4450,7 +4552,6 @@ static void binder_deferred_flush(struct binder_proc *proc)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
binder_inner_proc_unlock(proc);
|
|
binder_inner_proc_unlock(proc);
|
|
- wake_up_interruptible_all(&proc->wait);
|
|
|
|
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
"binder_flush: %d woke %d threads\n", proc->pid,
|
|
"binder_flush: %d woke %d threads\n", proc->pid,
|
|
@@ -4519,7 +4620,7 @@ static int binder_node_release(struct binder_node *node, int refs)
|
|
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
|
|
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
|
|
binder_enqueue_work_ilocked(&ref->death->work,
|
|
binder_enqueue_work_ilocked(&ref->death->work,
|
|
&ref->proc->todo);
|
|
&ref->proc->todo);
|
|
- wake_up_interruptible(&ref->proc->wait);
|
|
|
|
|
|
+ binder_wakeup_proc_ilocked(ref->proc, false);
|
|
binder_inner_proc_unlock(ref->proc);
|
|
binder_inner_proc_unlock(ref->proc);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5007,23 +5108,29 @@ static void print_binder_proc_stats(struct seq_file *m,
|
|
struct binder_proc *proc)
|
|
struct binder_proc *proc)
|
|
{
|
|
{
|
|
struct binder_work *w;
|
|
struct binder_work *w;
|
|
|
|
+ struct binder_thread *thread;
|
|
struct rb_node *n;
|
|
struct rb_node *n;
|
|
- int count, strong, weak;
|
|
|
|
|
|
+ int count, strong, weak, ready_threads;
|
|
size_t free_async_space =
|
|
size_t free_async_space =
|
|
binder_alloc_get_free_async_space(&proc->alloc);
|
|
binder_alloc_get_free_async_space(&proc->alloc);
|
|
|
|
|
|
seq_printf(m, "proc %d\n", proc->pid);
|
|
seq_printf(m, "proc %d\n", proc->pid);
|
|
seq_printf(m, "context %s\n", proc->context->name);
|
|
seq_printf(m, "context %s\n", proc->context->name);
|
|
count = 0;
|
|
count = 0;
|
|
|
|
+ ready_threads = 0;
|
|
binder_inner_proc_lock(proc);
|
|
binder_inner_proc_lock(proc);
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
|
|
count++;
|
|
count++;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
|
|
|
|
+ ready_threads++;
|
|
|
|
+
|
|
seq_printf(m, " threads: %d\n", count);
|
|
seq_printf(m, " threads: %d\n", count);
|
|
seq_printf(m, " requested threads: %d+%d/%d\n"
|
|
seq_printf(m, " requested threads: %d+%d/%d\n"
|
|
" ready threads %d\n"
|
|
" ready threads %d\n"
|
|
" free async space %zd\n", proc->requested_threads,
|
|
" free async space %zd\n", proc->requested_threads,
|
|
proc->requested_threads_started, proc->max_threads,
|
|
proc->requested_threads_started, proc->max_threads,
|
|
- proc->ready_threads,
|
|
|
|
|
|
+ ready_threads,
|
|
free_async_space);
|
|
free_async_space);
|
|
count = 0;
|
|
count = 0;
|
|
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
|
|
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
|