|
@@ -71,6 +71,7 @@
|
|
|
#include <linux/security.h>
|
|
|
#include <linux/spinlock.h>
|
|
|
#include <linux/ratelimit.h>
|
|
|
+#include <linux/syscalls.h>
|
|
|
|
|
|
#include <uapi/linux/android/binder.h>
|
|
|
|
|
@@ -457,9 +458,8 @@ struct binder_ref {
|
|
|
};
|
|
|
|
|
|
enum binder_deferred_state {
|
|
|
- BINDER_DEFERRED_PUT_FILES = 0x01,
|
|
|
- BINDER_DEFERRED_FLUSH = 0x02,
|
|
|
- BINDER_DEFERRED_RELEASE = 0x04,
|
|
|
+ BINDER_DEFERRED_FLUSH = 0x01,
|
|
|
+ BINDER_DEFERRED_RELEASE = 0x02,
|
|
|
};
|
|
|
|
|
|
/**
|
|
@@ -480,9 +480,6 @@ enum binder_deferred_state {
|
|
|
* (invariant after initialized)
|
|
|
* @tsk task_struct for group_leader of process
|
|
|
* (invariant after initialized)
|
|
|
- * @files files_struct for process
|
|
|
- * (protected by @files_lock)
|
|
|
- * @files_lock mutex to protect @files
|
|
|
* @deferred_work_node: element for binder_deferred_list
|
|
|
* (protected by binder_deferred_lock)
|
|
|
* @deferred_work: bitmap of deferred work to perform
|
|
@@ -527,8 +524,6 @@ struct binder_proc {
|
|
|
struct list_head waiting_threads;
|
|
|
int pid;
|
|
|
struct task_struct *tsk;
|
|
|
- struct files_struct *files;
|
|
|
- struct mutex files_lock;
|
|
|
struct hlist_node deferred_work_node;
|
|
|
int deferred_work;
|
|
|
bool is_dead;
|
|
@@ -611,6 +606,23 @@ struct binder_thread {
|
|
|
bool is_dead;
|
|
|
};
|
|
|
|
|
|
+/**
|
|
|
+ * struct binder_txn_fd_fixup - transaction fd fixup list element
|
|
|
+ * @fixup_entry: list entry
|
|
|
+ * @file: struct file to be associated with new fd
|
|
|
+ * @offset: offset in buffer data to this fixup
|
|
|
+ *
|
|
|
+ * List element for fd fixups in a transaction. Since file
|
|
|
+ * descriptors need to be allocated in the context of the
|
|
|
+ * target process, we pass each fd to be processed in this
|
|
|
+ * struct.
|
|
|
+ */
|
|
|
+struct binder_txn_fd_fixup {
|
|
|
+ struct list_head fixup_entry;
|
|
|
+ struct file *file;
|
|
|
+ size_t offset;
|
|
|
+};
|
|
|
+
|
|
|
struct binder_transaction {
|
|
|
int debug_id;
|
|
|
struct binder_work work;
|
|
@@ -628,6 +640,7 @@ struct binder_transaction {
|
|
|
long priority;
|
|
|
long saved_priority;
|
|
|
kuid_t sender_euid;
|
|
|
+ struct list_head fd_fixups;
|
|
|
/**
|
|
|
* @lock: protects @from, @to_proc, and @to_thread
|
|
|
*
|
|
@@ -822,6 +835,7 @@ static void
|
|
|
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
|
|
|
struct binder_work *work)
|
|
|
{
|
|
|
+ WARN_ON(!list_empty(&thread->waiting_thread_node));
|
|
|
binder_enqueue_work_ilocked(work, &thread->todo);
|
|
|
}
|
|
|
|
|
@@ -839,6 +853,7 @@ static void
|
|
|
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
|
|
|
struct binder_work *work)
|
|
|
{
|
|
|
+ WARN_ON(!list_empty(&thread->waiting_thread_node));
|
|
|
binder_enqueue_work_ilocked(work, &thread->todo);
|
|
|
thread->process_todo = true;
|
|
|
}
|
|
@@ -920,66 +935,6 @@ static void binder_free_thread(struct binder_thread *thread);
|
|
|
static void binder_free_proc(struct binder_proc *proc);
|
|
|
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
|
|
|
|
|
|
-static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
|
|
|
-{
|
|
|
- unsigned long rlim_cur;
|
|
|
- unsigned long irqs;
|
|
|
- int ret;
|
|
|
-
|
|
|
- mutex_lock(&proc->files_lock);
|
|
|
- if (proc->files == NULL) {
|
|
|
- ret = -ESRCH;
|
|
|
- goto err;
|
|
|
- }
|
|
|
- if (!lock_task_sighand(proc->tsk, &irqs)) {
|
|
|
- ret = -EMFILE;
|
|
|
- goto err;
|
|
|
- }
|
|
|
- rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
|
|
|
- unlock_task_sighand(proc->tsk, &irqs);
|
|
|
-
|
|
|
- ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
|
|
|
-err:
|
|
|
- mutex_unlock(&proc->files_lock);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * copied from fd_install
|
|
|
- */
|
|
|
-static void task_fd_install(
|
|
|
- struct binder_proc *proc, unsigned int fd, struct file *file)
|
|
|
-{
|
|
|
- mutex_lock(&proc->files_lock);
|
|
|
- if (proc->files)
|
|
|
- __fd_install(proc->files, fd, file);
|
|
|
- mutex_unlock(&proc->files_lock);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * copied from sys_close
|
|
|
- */
|
|
|
-static long task_close_fd(struct binder_proc *proc, unsigned int fd)
|
|
|
-{
|
|
|
- int retval;
|
|
|
-
|
|
|
- mutex_lock(&proc->files_lock);
|
|
|
- if (proc->files == NULL) {
|
|
|
- retval = -ESRCH;
|
|
|
- goto err;
|
|
|
- }
|
|
|
- retval = __close_fd(proc->files, fd);
|
|
|
- /* can't restart close syscall because file table entry was cleared */
|
|
|
- if (unlikely(retval == -ERESTARTSYS ||
|
|
|
- retval == -ERESTARTNOINTR ||
|
|
|
- retval == -ERESTARTNOHAND ||
|
|
|
- retval == -ERESTART_RESTARTBLOCK))
|
|
|
- retval = -EINTR;
|
|
|
-err:
|
|
|
- mutex_unlock(&proc->files_lock);
|
|
|
- return retval;
|
|
|
-}
|
|
|
-
|
|
|
static bool binder_has_work_ilocked(struct binder_thread *thread,
|
|
|
bool do_proc_work)
|
|
|
{
|
|
@@ -1270,19 +1225,12 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
|
|
|
} else
|
|
|
node->local_strong_refs++;
|
|
|
if (!node->has_strong_ref && target_list) {
|
|
|
+ struct binder_thread *thread = container_of(target_list,
|
|
|
+ struct binder_thread, todo);
|
|
|
binder_dequeue_work_ilocked(&node->work);
|
|
|
- /*
|
|
|
- * Note: this function is the only place where we queue
|
|
|
- * directly to a thread->todo without using the
|
|
|
- * corresponding binder_enqueue_thread_work() helper
|
|
|
- * functions; in this case it's ok to not set the
|
|
|
- * process_todo flag, since we know this node work will
|
|
|
- * always be followed by other work that starts queue
|
|
|
- * processing: in case of synchronous transactions, a
|
|
|
- * BR_REPLY or BR_ERROR; in case of oneway
|
|
|
- * transactions, a BR_TRANSACTION_COMPLETE.
|
|
|
- */
|
|
|
- binder_enqueue_work_ilocked(&node->work, target_list);
|
|
|
+ BUG_ON(&thread->todo != target_list);
|
|
|
+ binder_enqueue_deferred_thread_work_ilocked(thread,
|
|
|
+ &node->work);
|
|
|
}
|
|
|
} else {
|
|
|
if (!internal)
|
|
@@ -1958,10 +1906,32 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * binder_free_txn_fixups() - free unprocessed fd fixups
|
|
|
+ * @t: binder transaction for t->from
|
|
|
+ *
|
|
|
+ * If the transaction is being torn down prior to being
|
|
|
+ * processed by the target process, free all of the
|
|
|
+ * fd fixups and fput the file structs. It is safe to
|
|
|
+ * call this function after the fixups have been
|
|
|
+ * processed -- in that case, the list will be empty.
|
|
|
+ */
|
|
|
+static void binder_free_txn_fixups(struct binder_transaction *t)
|
|
|
+{
|
|
|
+ struct binder_txn_fd_fixup *fixup, *tmp;
|
|
|
+
|
|
|
+ list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
|
|
|
+ fput(fixup->file);
|
|
|
+ list_del(&fixup->fixup_entry);
|
|
|
+ kfree(fixup);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void binder_free_transaction(struct binder_transaction *t)
|
|
|
{
|
|
|
if (t->buffer)
|
|
|
t->buffer->transaction = NULL;
|
|
|
+ binder_free_txn_fixups(t);
|
|
|
kfree(t);
|
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
|
|
}
|
|
@@ -2262,12 +2232,17 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|
|
} break;
|
|
|
|
|
|
case BINDER_TYPE_FD: {
|
|
|
- struct binder_fd_object *fp = to_binder_fd_object(hdr);
|
|
|
-
|
|
|
- binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
- " fd %d\n", fp->fd);
|
|
|
- if (failed_at)
|
|
|
- task_close_fd(proc, fp->fd);
|
|
|
+ /*
|
|
|
+ * No need to close the file here since user-space
|
|
|
+ * closes it for for successfully delivered
|
|
|
+ * transactions. For transactions that weren't
|
|
|
+ * delivered, the new fd was never allocated so
|
|
|
+ * there is no need to close and the fput on the
|
|
|
+ * file is done when the transaction is torn
|
|
|
+ * down.
|
|
|
+ */
|
|
|
+ WARN_ON(failed_at &&
|
|
|
+ proc->tsk == current->group_leader);
|
|
|
} break;
|
|
|
case BINDER_TYPE_PTR:
|
|
|
/*
|
|
@@ -2283,6 +2258,15 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|
|
size_t fd_index;
|
|
|
binder_size_t fd_buf_size;
|
|
|
|
|
|
+ if (proc->tsk != current->group_leader) {
|
|
|
+ /*
|
|
|
+ * Nothing to do if running in sender context
|
|
|
+ * The fd fixups have not been applied so no
|
|
|
+ * fds need to be closed.
|
|
|
+ */
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
fda = to_binder_fd_array_object(hdr);
|
|
|
parent = binder_validate_ptr(buffer, fda->parent,
|
|
|
off_start,
|
|
@@ -2315,7 +2299,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|
|
}
|
|
|
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
|
|
|
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
|
|
|
- task_close_fd(proc, fd_array[fd_index]);
|
|
|
+ ksys_close(fd_array[fd_index]);
|
|
|
} break;
|
|
|
default:
|
|
|
pr_err("transaction release %d bad object type %x\n",
|
|
@@ -2447,17 +2431,18 @@ done:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int binder_translate_fd(int fd,
|
|
|
+static int binder_translate_fd(u32 *fdp,
|
|
|
struct binder_transaction *t,
|
|
|
struct binder_thread *thread,
|
|
|
struct binder_transaction *in_reply_to)
|
|
|
{
|
|
|
struct binder_proc *proc = thread->proc;
|
|
|
struct binder_proc *target_proc = t->to_proc;
|
|
|
- int target_fd;
|
|
|
+ struct binder_txn_fd_fixup *fixup;
|
|
|
struct file *file;
|
|
|
- int ret;
|
|
|
+ int ret = 0;
|
|
|
bool target_allows_fd;
|
|
|
+ int fd = *fdp;
|
|
|
|
|
|
if (in_reply_to)
|
|
|
target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
|
|
@@ -2485,19 +2470,24 @@ static int binder_translate_fd(int fd,
|
|
|
goto err_security;
|
|
|
}
|
|
|
|
|
|
- target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
|
|
|
- if (target_fd < 0) {
|
|
|
+ /*
|
|
|
+ * Add fixup record for this transaction. The allocation
|
|
|
+ * of the fd in the target needs to be done from a
|
|
|
+ * target thread.
|
|
|
+ */
|
|
|
+ fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
|
|
|
+ if (!fixup) {
|
|
|
ret = -ENOMEM;
|
|
|
- goto err_get_unused_fd;
|
|
|
+ goto err_alloc;
|
|
|
}
|
|
|
- task_fd_install(target_proc, target_fd, file);
|
|
|
- trace_binder_transaction_fd(t, fd, target_fd);
|
|
|
- binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
|
|
|
- fd, target_fd);
|
|
|
+ fixup->file = file;
|
|
|
+ fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
|
|
|
+ trace_binder_transaction_fd_send(t, fd, fixup->offset);
|
|
|
+ list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
|
|
|
|
|
|
- return target_fd;
|
|
|
+ return ret;
|
|
|
|
|
|
-err_get_unused_fd:
|
|
|
+err_alloc:
|
|
|
err_security:
|
|
|
fput(file);
|
|
|
err_fget:
|
|
@@ -2511,8 +2501,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
|
|
|
struct binder_thread *thread,
|
|
|
struct binder_transaction *in_reply_to)
|
|
|
{
|
|
|
- binder_size_t fdi, fd_buf_size, num_installed_fds;
|
|
|
- int target_fd;
|
|
|
+ binder_size_t fdi, fd_buf_size;
|
|
|
uintptr_t parent_buffer;
|
|
|
u32 *fd_array;
|
|
|
struct binder_proc *proc = thread->proc;
|
|
@@ -2544,23 +2533,12 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
for (fdi = 0; fdi < fda->num_fds; fdi++) {
|
|
|
- target_fd = binder_translate_fd(fd_array[fdi], t, thread,
|
|
|
+ int ret = binder_translate_fd(&fd_array[fdi], t, thread,
|
|
|
in_reply_to);
|
|
|
- if (target_fd < 0)
|
|
|
- goto err_translate_fd_failed;
|
|
|
- fd_array[fdi] = target_fd;
|
|
|
+ if (ret < 0)
|
|
|
+ return ret;
|
|
|
}
|
|
|
return 0;
|
|
|
-
|
|
|
-err_translate_fd_failed:
|
|
|
- /*
|
|
|
- * Failed to allocate fd or security error, free fds
|
|
|
- * installed so far.
|
|
|
- */
|
|
|
- num_installed_fds = fdi;
|
|
|
- for (fdi = 0; fdi < num_installed_fds; fdi++)
|
|
|
- task_close_fd(target_proc, fd_array[fdi]);
|
|
|
- return target_fd;
|
|
|
}
|
|
|
|
|
|
static int binder_fixup_parent(struct binder_transaction *t,
|
|
@@ -2723,6 +2701,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
{
|
|
|
int ret;
|
|
|
struct binder_transaction *t;
|
|
|
+ struct binder_work *w;
|
|
|
struct binder_work *tcomplete;
|
|
|
binder_size_t *offp, *off_end, *off_start;
|
|
|
binder_size_t off_min;
|
|
@@ -2864,6 +2843,29 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
goto err_invalid_target_handle;
|
|
|
}
|
|
|
binder_inner_proc_lock(proc);
|
|
|
+
|
|
|
+ w = list_first_entry_or_null(&thread->todo,
|
|
|
+ struct binder_work, entry);
|
|
|
+ if (!(tr->flags & TF_ONE_WAY) && w &&
|
|
|
+ w->type == BINDER_WORK_TRANSACTION) {
|
|
|
+ /*
|
|
|
+ * Do not allow new outgoing transaction from a
|
|
|
+ * thread that has a transaction at the head of
|
|
|
+ * its todo list. Only need to check the head
|
|
|
+ * because binder_select_thread_ilocked picks a
|
|
|
+ * thread from proc->waiting_threads to enqueue
|
|
|
+ * the transaction, and nothing is queued to the
|
|
|
+ * todo list while the thread is on waiting_threads.
|
|
|
+ */
|
|
|
+ binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
|
|
|
+ proc->pid, thread->pid);
|
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
+ return_error = BR_FAILED_REPLY;
|
|
|
+ return_error_param = -EPROTO;
|
|
|
+ return_error_line = __LINE__;
|
|
|
+ goto err_bad_todo_list;
|
|
|
+ }
|
|
|
+
|
|
|
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
|
|
|
struct binder_transaction *tmp;
|
|
|
|
|
@@ -2911,6 +2913,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
return_error_line = __LINE__;
|
|
|
goto err_alloc_t_failed;
|
|
|
}
|
|
|
+ INIT_LIST_HEAD(&t->fd_fixups);
|
|
|
binder_stats_created(BINDER_STAT_TRANSACTION);
|
|
|
spin_lock_init(&t->lock);
|
|
|
|
|
@@ -3066,17 +3069,16 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
|
|
|
case BINDER_TYPE_FD: {
|
|
|
struct binder_fd_object *fp = to_binder_fd_object(hdr);
|
|
|
- int target_fd = binder_translate_fd(fp->fd, t, thread,
|
|
|
- in_reply_to);
|
|
|
+ int ret = binder_translate_fd(&fp->fd, t, thread,
|
|
|
+ in_reply_to);
|
|
|
|
|
|
- if (target_fd < 0) {
|
|
|
+ if (ret < 0) {
|
|
|
return_error = BR_FAILED_REPLY;
|
|
|
- return_error_param = target_fd;
|
|
|
+ return_error_param = ret;
|
|
|
return_error_line = __LINE__;
|
|
|
goto err_translate_failed;
|
|
|
}
|
|
|
fp->pad_binder = 0;
|
|
|
- fp->fd = target_fd;
|
|
|
} break;
|
|
|
case BINDER_TYPE_FDA: {
|
|
|
struct binder_fd_array_object *fda =
|
|
@@ -3233,6 +3235,7 @@ err_bad_object_type:
|
|
|
err_bad_offset:
|
|
|
err_bad_parent:
|
|
|
err_copy_data_failed:
|
|
|
+ binder_free_txn_fixups(t);
|
|
|
trace_binder_transaction_failed_buffer_release(t->buffer);
|
|
|
binder_transaction_buffer_release(target_proc, t->buffer, offp);
|
|
|
if (target_node)
|
|
@@ -3247,6 +3250,7 @@ err_alloc_tcomplete_failed:
|
|
|
kfree(t);
|
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
|
|
err_alloc_t_failed:
|
|
|
+err_bad_todo_list:
|
|
|
err_bad_call_stack:
|
|
|
err_empty_call_stack:
|
|
|
err_dead_binder:
|
|
@@ -3294,6 +3298,47 @@ err_invalid_target_handle:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * binder_free_buf() - free the specified buffer
|
|
|
+ * @proc: binder proc that owns buffer
|
|
|
+ * @buffer: buffer to be freed
|
|
|
+ *
|
|
|
+ * If buffer for an async transaction, enqueue the next async
|
|
|
+ * transaction from the node.
|
|
|
+ *
|
|
|
+ * Cleanup buffer and free it.
|
|
|
+ */
|
|
|
+static void
|
|
|
+binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
|
|
|
+{
|
|
|
+ if (buffer->transaction) {
|
|
|
+ buffer->transaction->buffer = NULL;
|
|
|
+ buffer->transaction = NULL;
|
|
|
+ }
|
|
|
+ if (buffer->async_transaction && buffer->target_node) {
|
|
|
+ struct binder_node *buf_node;
|
|
|
+ struct binder_work *w;
|
|
|
+
|
|
|
+ buf_node = buffer->target_node;
|
|
|
+ binder_node_inner_lock(buf_node);
|
|
|
+ BUG_ON(!buf_node->has_async_transaction);
|
|
|
+ BUG_ON(buf_node->proc != proc);
|
|
|
+ w = binder_dequeue_work_head_ilocked(
|
|
|
+ &buf_node->async_todo);
|
|
|
+ if (!w) {
|
|
|
+ buf_node->has_async_transaction = false;
|
|
|
+ } else {
|
|
|
+ binder_enqueue_work_ilocked(
|
|
|
+ w, &proc->todo);
|
|
|
+ binder_wakeup_proc_ilocked(proc);
|
|
|
+ }
|
|
|
+ binder_node_inner_unlock(buf_node);
|
|
|
+ }
|
|
|
+ trace_binder_transaction_buffer_release(buffer);
|
|
|
+ binder_transaction_buffer_release(proc, buffer, NULL);
|
|
|
+ binder_alloc_free_buf(&proc->alloc, buffer);
|
|
|
+}
|
|
|
+
|
|
|
static int binder_thread_write(struct binder_proc *proc,
|
|
|
struct binder_thread *thread,
|
|
|
binder_uintptr_t binder_buffer, size_t size,
|
|
@@ -3480,33 +3525,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
|
|
proc->pid, thread->pid, (u64)data_ptr,
|
|
|
buffer->debug_id,
|
|
|
buffer->transaction ? "active" : "finished");
|
|
|
-
|
|
|
- if (buffer->transaction) {
|
|
|
- buffer->transaction->buffer = NULL;
|
|
|
- buffer->transaction = NULL;
|
|
|
- }
|
|
|
- if (buffer->async_transaction && buffer->target_node) {
|
|
|
- struct binder_node *buf_node;
|
|
|
- struct binder_work *w;
|
|
|
-
|
|
|
- buf_node = buffer->target_node;
|
|
|
- binder_node_inner_lock(buf_node);
|
|
|
- BUG_ON(!buf_node->has_async_transaction);
|
|
|
- BUG_ON(buf_node->proc != proc);
|
|
|
- w = binder_dequeue_work_head_ilocked(
|
|
|
- &buf_node->async_todo);
|
|
|
- if (!w) {
|
|
|
- buf_node->has_async_transaction = false;
|
|
|
- } else {
|
|
|
- binder_enqueue_work_ilocked(
|
|
|
- w, &proc->todo);
|
|
|
- binder_wakeup_proc_ilocked(proc);
|
|
|
- }
|
|
|
- binder_node_inner_unlock(buf_node);
|
|
|
- }
|
|
|
- trace_binder_transaction_buffer_release(buffer);
|
|
|
- binder_transaction_buffer_release(proc, buffer, NULL);
|
|
|
- binder_alloc_free_buf(&proc->alloc, buffer);
|
|
|
+ binder_free_buf(proc, buffer);
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -3829,6 +3848,76 @@ static int binder_wait_for_work(struct binder_thread *thread,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * binder_apply_fd_fixups() - finish fd translation
|
|
|
+ * @t: binder transaction with list of fd fixups
|
|
|
+ *
|
|
|
+ * Now that we are in the context of the transaction target
|
|
|
+ * process, we can allocate and install fds. Process the
|
|
|
+ * list of fds to translate and fixup the buffer with the
|
|
|
+ * new fds.
|
|
|
+ *
|
|
|
+ * If we fail to allocate an fd, then free the resources by
|
|
|
+ * fput'ing files that have not been processed and ksys_close'ing
|
|
|
+ * any fds that have already been allocated.
|
|
|
+ */
|
|
|
+static int binder_apply_fd_fixups(struct binder_transaction *t)
|
|
|
+{
|
|
|
+ struct binder_txn_fd_fixup *fixup, *tmp;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
|
|
|
+ int fd = get_unused_fd_flags(O_CLOEXEC);
|
|
|
+ u32 *fdp;
|
|
|
+
|
|
|
+ if (fd < 0) {
|
|
|
+ binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
+ "failed fd fixup txn %d fd %d\n",
|
|
|
+ t->debug_id, fd);
|
|
|
+ ret = -ENOMEM;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
+ "fd fixup txn %d fd %d\n",
|
|
|
+ t->debug_id, fd);
|
|
|
+ trace_binder_transaction_fd_recv(t, fd, fixup->offset);
|
|
|
+ fd_install(fd, fixup->file);
|
|
|
+ fixup->file = NULL;
|
|
|
+ fdp = (u32 *)(t->buffer->data + fixup->offset);
|
|
|
+ /*
|
|
|
+ * This store can cause problems for CPUs with a
|
|
|
+ * VIVT cache (eg ARMv5) since the cache cannot
|
|
|
+ * detect virtual aliases to the same physical cacheline.
|
|
|
+ * To support VIVT, this address and the user-space VA
|
|
|
+ * would both need to be flushed. Since this kernel
|
|
|
+ * VA is not constructed via page_to_virt(), we can't
|
|
|
+ * use flush_dcache_page() on it, so we'd have to use
|
|
|
+ * an internal function. If devices with VIVT ever
|
|
|
+ * need to run Android, we'll either need to go back
|
|
|
+ * to patching the translated fd from the sender side
|
|
|
+ * (using the non-standard kernel functions), or rework
|
|
|
+ * how the kernel uses the buffer to use page_to_virt()
|
|
|
+ * addresses instead of allocating in our own vm area.
|
|
|
+ *
|
|
|
+ * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
|
|
|
+ */
|
|
|
+ *fdp = fd;
|
|
|
+ }
|
|
|
+ list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
|
|
|
+ if (fixup->file) {
|
|
|
+ fput(fixup->file);
|
|
|
+ } else if (ret) {
|
|
|
+ u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
|
|
|
+
|
|
|
+ ksys_close(*fdp);
|
|
|
+ }
|
|
|
+ list_del(&fixup->fixup_entry);
|
|
|
+ kfree(fixup);
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static int binder_thread_read(struct binder_proc *proc,
|
|
|
struct binder_thread *thread,
|
|
|
binder_uintptr_t binder_buffer, size_t size,
|
|
@@ -4110,6 +4199,34 @@ retry:
|
|
|
tr.sender_pid = 0;
|
|
|
}
|
|
|
|
|
|
+ ret = binder_apply_fd_fixups(t);
|
|
|
+ if (ret) {
|
|
|
+ struct binder_buffer *buffer = t->buffer;
|
|
|
+ bool oneway = !!(t->flags & TF_ONE_WAY);
|
|
|
+ int tid = t->debug_id;
|
|
|
+
|
|
|
+ if (t_from)
|
|
|
+ binder_thread_dec_tmpref(t_from);
|
|
|
+ buffer->transaction = NULL;
|
|
|
+ binder_cleanup_transaction(t, "fd fixups failed",
|
|
|
+ BR_FAILED_REPLY);
|
|
|
+ binder_free_buf(proc, buffer);
|
|
|
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
|
+ "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
|
|
|
+ proc->pid, thread->pid,
|
|
|
+ oneway ? "async " :
|
|
|
+ (cmd == BR_REPLY ? "reply " : ""),
|
|
|
+ tid, BR_FAILED_REPLY, ret, __LINE__);
|
|
|
+ if (cmd == BR_REPLY) {
|
|
|
+ cmd = BR_FAILED_REPLY;
|
|
|
+ if (put_user(cmd, (uint32_t __user *)ptr))
|
|
|
+ return -EFAULT;
|
|
|
+ ptr += sizeof(uint32_t);
|
|
|
+ binder_stat_br(proc, thread, cmd);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ continue;
|
|
|
+ }
|
|
|
tr.data_size = t->buffer->data_size;
|
|
|
tr.offsets_size = t->buffer->offsets_size;
|
|
|
tr.data.ptr.buffer = (binder_uintptr_t)
|
|
@@ -4544,6 +4661,42 @@ out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
|
|
|
+ struct binder_node_info_for_ref *info)
|
|
|
+{
|
|
|
+ struct binder_node *node;
|
|
|
+ struct binder_context *context = proc->context;
|
|
|
+ __u32 handle = info->handle;
|
|
|
+
|
|
|
+ if (info->strong_count || info->weak_count || info->reserved1 ||
|
|
|
+ info->reserved2 || info->reserved3) {
|
|
|
+ binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
|
|
|
+ proc->pid);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* This ioctl may only be used by the context manager */
|
|
|
+ mutex_lock(&context->context_mgr_node_lock);
|
|
|
+ if (!context->binder_context_mgr_node ||
|
|
|
+ context->binder_context_mgr_node->proc != proc) {
|
|
|
+ mutex_unlock(&context->context_mgr_node_lock);
|
|
|
+ return -EPERM;
|
|
|
+ }
|
|
|
+ mutex_unlock(&context->context_mgr_node_lock);
|
|
|
+
|
|
|
+ node = binder_get_node_from_ref(proc, handle, true, NULL);
|
|
|
+ if (!node)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ info->strong_count = node->local_strong_refs +
|
|
|
+ node->internal_strong_refs;
|
|
|
+ info->weak_count = node->local_weak_refs;
|
|
|
+
|
|
|
+ binder_put_node(node);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
|
|
|
struct binder_node_debug_info *info)
|
|
|
{
|
|
@@ -4638,6 +4791,25 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
+ case BINDER_GET_NODE_INFO_FOR_REF: {
|
|
|
+ struct binder_node_info_for_ref info;
|
|
|
+
|
|
|
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
|
|
|
+ ret = -EFAULT;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = binder_ioctl_get_node_info_for_ref(proc, &info);
|
|
|
+ if (ret < 0)
|
|
|
+ goto err;
|
|
|
+
|
|
|
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
|
|
|
+ ret = -EFAULT;
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
+
|
|
|
+ break;
|
|
|
+ }
|
|
|
case BINDER_GET_NODE_DEBUG_INFO: {
|
|
|
struct binder_node_debug_info info;
|
|
|
|
|
@@ -4693,7 +4865,6 @@ static void binder_vma_close(struct vm_area_struct *vma)
|
|
|
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
|
|
|
(unsigned long)pgprot_val(vma->vm_page_prot));
|
|
|
binder_alloc_vma_close(&proc->alloc);
|
|
|
- binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
|
|
|
}
|
|
|
|
|
|
static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
|
|
@@ -4739,9 +4910,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
|
|
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
- mutex_lock(&proc->files_lock);
|
|
|
- proc->files = get_files_struct(current);
|
|
|
- mutex_unlock(&proc->files_lock);
|
|
|
return 0;
|
|
|
|
|
|
err_bad_arg:
|
|
@@ -4765,7 +4933,6 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
|
|
spin_lock_init(&proc->outer_lock);
|
|
|
get_task_struct(current->group_leader);
|
|
|
proc->tsk = current->group_leader;
|
|
|
- mutex_init(&proc->files_lock);
|
|
|
INIT_LIST_HEAD(&proc->todo);
|
|
|
proc->default_priority = task_nice(current);
|
|
|
binder_dev = container_of(filp->private_data, struct binder_device,
|
|
@@ -4915,8 +5082,6 @@ static void binder_deferred_release(struct binder_proc *proc)
|
|
|
struct rb_node *n;
|
|
|
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
|
|
|
|
|
|
- BUG_ON(proc->files);
|
|
|
-
|
|
|
mutex_lock(&binder_procs_lock);
|
|
|
hlist_del(&proc->proc_node);
|
|
|
mutex_unlock(&binder_procs_lock);
|
|
@@ -4998,7 +5163,6 @@ static void binder_deferred_release(struct binder_proc *proc)
|
|
|
static void binder_deferred_func(struct work_struct *work)
|
|
|
{
|
|
|
struct binder_proc *proc;
|
|
|
- struct files_struct *files;
|
|
|
|
|
|
int defer;
|
|
|
|
|
@@ -5016,23 +5180,11 @@ static void binder_deferred_func(struct work_struct *work)
|
|
|
}
|
|
|
mutex_unlock(&binder_deferred_lock);
|
|
|
|
|
|
- files = NULL;
|
|
|
- if (defer & BINDER_DEFERRED_PUT_FILES) {
|
|
|
- mutex_lock(&proc->files_lock);
|
|
|
- files = proc->files;
|
|
|
- if (files)
|
|
|
- proc->files = NULL;
|
|
|
- mutex_unlock(&proc->files_lock);
|
|
|
- }
|
|
|
-
|
|
|
if (defer & BINDER_DEFERRED_FLUSH)
|
|
|
binder_deferred_flush(proc);
|
|
|
|
|
|
if (defer & BINDER_DEFERRED_RELEASE)
|
|
|
binder_deferred_release(proc); /* frees proc */
|
|
|
-
|
|
|
- if (files)
|
|
|
- put_files_struct(files);
|
|
|
} while (proc);
|
|
|
}
|
|
|
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
|
|
@@ -5667,12 +5819,11 @@ static int __init binder_init(void)
|
|
|
* Copy the module_parameter string, because we don't want to
|
|
|
* tokenize it in-place.
|
|
|
*/
|
|
|
- device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
|
|
|
+ device_names = kstrdup(binder_devices_param, GFP_KERNEL);
|
|
|
if (!device_names) {
|
|
|
ret = -ENOMEM;
|
|
|
goto err_alloc_device_names_failed;
|
|
|
}
|
|
|
- strcpy(device_names, binder_devices_param);
|
|
|
|
|
|
device_tmp = device_names;
|
|
|
while ((device_name = strsep(&device_tmp, ","))) {
|