|
|
@@ -577,6 +577,8 @@ enum {
|
|
|
* (protected by @proc->inner_lock)
|
|
|
* @todo: list of work to do for this thread
|
|
|
* (protected by @proc->inner_lock)
|
|
|
+ * @process_todo: whether work in @todo should be processed
|
|
|
+ * (protected by @proc->inner_lock)
|
|
|
* @return_error: transaction errors reported by this thread
|
|
|
* (only accessed by this thread)
|
|
|
* @reply_error: transaction errors reported by target thread
|
|
|
@@ -602,6 +604,7 @@ struct binder_thread {
|
|
|
bool looper_need_return; /* can be written by other thread */
|
|
|
struct binder_transaction *transaction_stack;
|
|
|
struct list_head todo;
|
|
|
+ bool process_todo;
|
|
|
struct binder_error return_error;
|
|
|
struct binder_error reply_error;
|
|
|
wait_queue_head_t wait;
|
|
|
@@ -787,6 +790,16 @@ static bool binder_worklist_empty(struct binder_proc *proc,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * binder_enqueue_work_ilocked() - Add an item to the work list
|
|
|
+ * @work: struct binder_work to add to list
|
|
|
+ * @target_list: list to add work to
|
|
|
+ *
|
|
|
+ * Adds the work to the specified list. Asserts that work
|
|
|
+ * is not already on a list.
|
|
|
+ *
|
|
|
+ * Requires the proc->inner_lock to be held.
|
|
|
+ */
|
|
|
static void
|
|
|
binder_enqueue_work_ilocked(struct binder_work *work,
|
|
|
struct list_head *target_list)
|
|
|
@@ -797,22 +810,56 @@ binder_enqueue_work_ilocked(struct binder_work *work,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * binder_enqueue_work() - Add an item to the work list
|
|
|
- * @proc: binder_proc associated with list
|
|
|
+ * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
|
|
|
+ * @thread: thread to queue work to
|
|
|
* @work: struct binder_work to add to list
|
|
|
- * @target_list: list to add work to
|
|
|
*
|
|
|
- * Adds the work to the specified list. Asserts that work
|
|
|
- * is not already on a list.
|
|
|
+ * Adds the work to the todo list of the thread. Doesn't set the process_todo
|
|
|
+ * flag, which means that (if it wasn't already set) the thread will go to
|
|
|
+ * sleep without handling this work when it calls read.
|
|
|
+ *
|
|
|
+ * Requires the proc->inner_lock to be held.
|
|
|
*/
|
|
|
static void
|
|
|
-binder_enqueue_work(struct binder_proc *proc,
|
|
|
- struct binder_work *work,
|
|
|
- struct list_head *target_list)
|
|
|
+binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
|
|
|
+ struct binder_work *work)
|
|
|
{
|
|
|
- binder_inner_proc_lock(proc);
|
|
|
- binder_enqueue_work_ilocked(work, target_list);
|
|
|
- binder_inner_proc_unlock(proc);
|
|
|
+ binder_enqueue_work_ilocked(work, &thread->todo);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
|
|
|
+ * @thread: thread to queue work to
|
|
|
+ * @work: struct binder_work to add to list
|
|
|
+ *
|
|
|
+ * Adds the work to the todo list of the thread, and enables processing
|
|
|
+ * of the todo queue.
|
|
|
+ *
|
|
|
+ * Requires the proc->inner_lock to be held.
|
|
|
+ */
|
|
|
+static void
|
|
|
+binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
|
|
|
+ struct binder_work *work)
|
|
|
+{
|
|
|
+ binder_enqueue_work_ilocked(work, &thread->todo);
|
|
|
+ thread->process_todo = true;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * binder_enqueue_thread_work() - Add an item to the thread work list
|
|
|
+ * @thread: thread to queue work to
|
|
|
+ * @work: struct binder_work to add to list
|
|
|
+ *
|
|
|
+ * Adds the work to the todo list of the thread, and enables processing
|
|
|
+ * of the todo queue.
|
|
|
+ */
|
|
|
+static void
|
|
|
+binder_enqueue_thread_work(struct binder_thread *thread,
|
|
|
+ struct binder_work *work)
|
|
|
+{
|
|
|
+ binder_inner_proc_lock(thread->proc);
|
|
|
+ binder_enqueue_thread_work_ilocked(thread, work);
|
|
|
+ binder_inner_proc_unlock(thread->proc);
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
@@ -927,7 +974,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
|
|
|
static bool binder_has_work_ilocked(struct binder_thread *thread,
|
|
|
bool do_proc_work)
|
|
|
{
|
|
|
- return !binder_worklist_empty_ilocked(&thread->todo) ||
|
|
|
+ return thread->process_todo ||
|
|
|
thread->looper_need_return ||
|
|
|
(do_proc_work &&
|
|
|
!binder_worklist_empty_ilocked(&thread->proc->todo));
|
|
|
@@ -1215,6 +1262,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
|
|
|
node->local_strong_refs++;
|
|
|
if (!node->has_strong_ref && target_list) {
|
|
|
binder_dequeue_work_ilocked(&node->work);
|
|
|
+ /*
|
|
|
+ * Note: this function is the only place where we queue
|
|
|
+ * directly to a thread->todo without using the
|
|
|
+ * corresponding binder_enqueue_thread_work() helper
|
|
|
+ * functions; in this case it's ok to not set the
|
|
|
+ * process_todo flag, since we know this node work will
|
|
|
+ * always be followed by other work that starts queue
|
|
|
+ * processing: in case of synchronous transactions, a
|
|
|
+ * BR_REPLY or BR_ERROR; in case of oneway
|
|
|
+ * transactions, a BR_TRANSACTION_COMPLETE.
|
|
|
+ */
|
|
|
binder_enqueue_work_ilocked(&node->work, target_list);
|
|
|
}
|
|
|
} else {
|
|
|
@@ -1226,6 +1284,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
|
|
|
node->debug_id);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
+ /*
|
|
|
+ * See comment above
|
|
|
+ */
|
|
|
binder_enqueue_work_ilocked(&node->work, target_list);
|
|
|
}
|
|
|
}
|
|
|
@@ -1915,9 +1976,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
|
|
|
binder_pop_transaction_ilocked(target_thread, t);
|
|
|
if (target_thread->reply_error.cmd == BR_OK) {
|
|
|
target_thread->reply_error.cmd = error_code;
|
|
|
- binder_enqueue_work_ilocked(
|
|
|
- &target_thread->reply_error.work,
|
|
|
- &target_thread->todo);
|
|
|
+ binder_enqueue_thread_work_ilocked(
|
|
|
+ target_thread,
|
|
|
+ &target_thread->reply_error.work);
|
|
|
wake_up_interruptible(&target_thread->wait);
|
|
|
} else {
|
|
|
WARN(1, "Unexpected reply error: %u\n",
|
|
|
@@ -2536,18 +2597,16 @@ static bool binder_proc_transaction(struct binder_transaction *t,
|
|
|
struct binder_proc *proc,
|
|
|
struct binder_thread *thread)
|
|
|
{
|
|
|
- struct list_head *target_list = NULL;
|
|
|
struct binder_node *node = t->buffer->target_node;
|
|
|
bool oneway = !!(t->flags & TF_ONE_WAY);
|
|
|
- bool wakeup = true;
|
|
|
+ bool pending_async = false;
|
|
|
|
|
|
BUG_ON(!node);
|
|
|
binder_node_lock(node);
|
|
|
if (oneway) {
|
|
|
BUG_ON(thread);
|
|
|
if (node->has_async_transaction) {
|
|
|
- target_list = &node->async_todo;
|
|
|
- wakeup = false;
|
|
|
+ pending_async = true;
|
|
|
} else {
|
|
|
node->has_async_transaction = 1;
|
|
|
}
|
|
|
@@ -2561,19 +2620,17 @@ static bool binder_proc_transaction(struct binder_transaction *t,
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
- if (!thread && !target_list)
|
|
|
+ if (!thread && !pending_async)
|
|
|
thread = binder_select_thread_ilocked(proc);
|
|
|
|
|
|
if (thread)
|
|
|
- target_list = &thread->todo;
|
|
|
- else if (!target_list)
|
|
|
- target_list = &proc->todo;
|
|
|
+ binder_enqueue_thread_work_ilocked(thread, &t->work);
|
|
|
+ else if (!pending_async)
|
|
|
+ binder_enqueue_work_ilocked(&t->work, &proc->todo);
|
|
|
else
|
|
|
- BUG_ON(target_list != &node->async_todo);
|
|
|
-
|
|
|
- binder_enqueue_work_ilocked(&t->work, target_list);
|
|
|
+ binder_enqueue_work_ilocked(&t->work, &node->async_todo);
|
|
|
|
|
|
- if (wakeup)
|
|
|
+ if (!pending_async)
|
|
|
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
|
|
|
|
|
|
binder_inner_proc_unlock(proc);
|
|
|
@@ -3068,10 +3125,10 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
}
|
|
|
}
|
|
|
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
|
|
|
- binder_enqueue_work(proc, tcomplete, &thread->todo);
|
|
|
t->work.type = BINDER_WORK_TRANSACTION;
|
|
|
|
|
|
if (reply) {
|
|
|
+ binder_enqueue_thread_work(thread, tcomplete);
|
|
|
binder_inner_proc_lock(target_proc);
|
|
|
if (target_thread->is_dead) {
|
|
|
binder_inner_proc_unlock(target_proc);
|
|
|
@@ -3079,13 +3136,21 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
}
|
|
|
BUG_ON(t->buffer->async_transaction != 0);
|
|
|
binder_pop_transaction_ilocked(target_thread, in_reply_to);
|
|
|
- binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
|
|
|
+ binder_enqueue_thread_work_ilocked(target_thread, &t->work);
|
|
|
binder_inner_proc_unlock(target_proc);
|
|
|
wake_up_interruptible_sync(&target_thread->wait);
|
|
|
binder_free_transaction(in_reply_to);
|
|
|
} else if (!(t->flags & TF_ONE_WAY)) {
|
|
|
BUG_ON(t->buffer->async_transaction != 0);
|
|
|
binder_inner_proc_lock(proc);
|
|
|
+ /*
|
|
|
+ * Defer the TRANSACTION_COMPLETE, so we don't return to
|
|
|
+ * userspace immediately; this allows the target process to
|
|
|
+ * immediately start processing this transaction, reducing
|
|
|
+ * latency. We will then return the TRANSACTION_COMPLETE when
|
|
|
+ * the target replies (or there is an error).
|
|
|
+ */
|
|
|
+ binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
|
|
|
t->need_reply = 1;
|
|
|
t->from_parent = thread->transaction_stack;
|
|
|
thread->transaction_stack = t;
|
|
|
@@ -3099,6 +3164,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
} else {
|
|
|
BUG_ON(target_node == NULL);
|
|
|
BUG_ON(t->buffer->async_transaction != 1);
|
|
|
+ binder_enqueue_thread_work(thread, tcomplete);
|
|
|
if (!binder_proc_transaction(t, target_proc, NULL))
|
|
|
goto err_dead_proc_or_thread;
|
|
|
}
|
|
|
@@ -3177,15 +3243,11 @@ err_invalid_target_handle:
|
|
|
BUG_ON(thread->return_error.cmd != BR_OK);
|
|
|
if (in_reply_to) {
|
|
|
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
|
|
|
- binder_enqueue_work(thread->proc,
|
|
|
- &thread->return_error.work,
|
|
|
- &thread->todo);
|
|
|
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
|
|
|
binder_send_failed_reply(in_reply_to, return_error);
|
|
|
} else {
|
|
|
thread->return_error.cmd = return_error;
|
|
|
- binder_enqueue_work(thread->proc,
|
|
|
- &thread->return_error.work,
|
|
|
- &thread->todo);
|
|
|
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -3489,10 +3551,9 @@ static int binder_thread_write(struct binder_proc *proc,
|
|
|
WARN_ON(thread->return_error.cmd !=
|
|
|
BR_OK);
|
|
|
thread->return_error.cmd = BR_ERROR;
|
|
|
- binder_enqueue_work(
|
|
|
- thread->proc,
|
|
|
- &thread->return_error.work,
|
|
|
- &thread->todo);
|
|
|
+ binder_enqueue_thread_work(
|
|
|
+ thread,
|
|
|
+ &thread->return_error.work);
|
|
|
binder_debug(
|
|
|
BINDER_DEBUG_FAILED_TRANSACTION,
|
|
|
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
|
|
|
@@ -3572,9 +3633,9 @@ static int binder_thread_write(struct binder_proc *proc,
|
|
|
if (thread->looper &
|
|
|
(BINDER_LOOPER_STATE_REGISTERED |
|
|
|
BINDER_LOOPER_STATE_ENTERED))
|
|
|
- binder_enqueue_work_ilocked(
|
|
|
- &death->work,
|
|
|
- &thread->todo);
|
|
|
+ binder_enqueue_thread_work_ilocked(
|
|
|
+ thread,
|
|
|
+ &death->work);
|
|
|
else {
|
|
|
binder_enqueue_work_ilocked(
|
|
|
&death->work,
|
|
|
@@ -3629,8 +3690,8 @@ static int binder_thread_write(struct binder_proc *proc,
|
|
|
if (thread->looper &
|
|
|
(BINDER_LOOPER_STATE_REGISTERED |
|
|
|
BINDER_LOOPER_STATE_ENTERED))
|
|
|
- binder_enqueue_work_ilocked(
|
|
|
- &death->work, &thread->todo);
|
|
|
+ binder_enqueue_thread_work_ilocked(
|
|
|
+ thread, &death->work);
|
|
|
else {
|
|
|
binder_enqueue_work_ilocked(
|
|
|
&death->work,
|
|
|
@@ -3804,6 +3865,8 @@ retry:
|
|
|
break;
|
|
|
}
|
|
|
w = binder_dequeue_work_head_ilocked(list);
|
|
|
+ if (binder_worklist_empty_ilocked(&thread->todo))
|
|
|
+ thread->process_todo = false;
|
|
|
|
|
|
switch (w->type) {
|
|
|
case BINDER_WORK_TRANSACTION: {
|