blob: ca7d866b89e8063b1db7ba195f03b0c2c2d186be [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Anmol Sarma56b468f2012-10-30 22:35:43 +053018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090020#include <asm/cacheflush.h>
21#include <linux/fdtable.h>
22#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000023#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090024#include <linux/fs.h>
25#include <linux/list.h>
26#include <linux/miscdevice.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090027#include <linux/module.h>
28#include <linux/mutex.h>
29#include <linux/nsproxy.h>
30#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070031#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090032#include <linux/rbtree.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010033#include <linux/sched/signal.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010034#include <linux/sched/mm.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070035#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090036#include <linux/uaccess.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080037#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050038#include <linux/security.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090039
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020040#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
41#define BINDER_IPC_32BIT 1
42#endif
43
44#include <uapi/linux/android/binder.h>
Todd Kjos0c972a02017-06-29 12:01:41 -070045#include "binder_alloc.h"
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070046#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090047
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070048static DEFINE_MUTEX(binder_main_lock);
Todd Kjosc44b1232017-06-29 12:01:43 -070049
50static HLIST_HEAD(binder_deferred_list);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090051static DEFINE_MUTEX(binder_deferred_lock);
52
Martijn Coenenac4812c2017-02-03 14:40:48 -080053static HLIST_HEAD(binder_devices);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090054static HLIST_HEAD(binder_procs);
Todd Kjosc44b1232017-06-29 12:01:43 -070055static DEFINE_MUTEX(binder_procs_lock);
56
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090057static HLIST_HEAD(binder_dead_nodes);
Todd Kjosc44b1232017-06-29 12:01:43 -070058static DEFINE_SPINLOCK(binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090059
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070060static struct dentry *binder_debugfs_dir_entry_root;
61static struct dentry *binder_debugfs_dir_entry_proc;
Todd Kjos656a8002017-06-29 12:01:45 -070062static atomic_t binder_last_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090063
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070064#define BINDER_DEBUG_ENTRY(name) \
65static int binder_##name##_open(struct inode *inode, struct file *file) \
66{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070067 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070068} \
69\
70static const struct file_operations binder_##name##_fops = { \
71 .owner = THIS_MODULE, \
72 .open = binder_##name##_open, \
73 .read = seq_read, \
74 .llseek = seq_lseek, \
75 .release = single_release, \
76}
77
78static int binder_proc_show(struct seq_file *m, void *unused);
79BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090080
81/* This is only defined in include/asm-arm/sizes.h */
82#ifndef SZ_1K
83#define SZ_1K 0x400
84#endif
85
86#ifndef SZ_4M
87#define SZ_4M 0x400000
88#endif
89
90#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
91
92#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
93
94enum {
95 BINDER_DEBUG_USER_ERROR = 1U << 0,
96 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
97 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
98 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
99 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
100 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
101 BINDER_DEBUG_READ_WRITE = 1U << 6,
102 BINDER_DEBUG_USER_REFS = 1U << 7,
103 BINDER_DEBUG_THREADS = 1U << 8,
104 BINDER_DEBUG_TRANSACTION = 1U << 9,
105 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
106 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
107 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
Todd Kjos19c98722017-06-29 12:01:40 -0700108 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900109};
110static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
111 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
112module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
113
Martijn Coenenac4812c2017-02-03 14:40:48 -0800114static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
115module_param_named(devices, binder_devices_param, charp, 0444);
116
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900117static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
118static int binder_stop_on_user_error;
119
120static int binder_set_stop_on_user_error(const char *val,
121 struct kernel_param *kp)
122{
123 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900124
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900125 ret = param_set_int(val, kp);
126 if (binder_stop_on_user_error < 2)
127 wake_up(&binder_user_error_wait);
128 return ret;
129}
130module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
131 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
132
133#define binder_debug(mask, x...) \
134 do { \
135 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400136 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900137 } while (0)
138
139#define binder_user_error(x...) \
140 do { \
141 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400142 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900143 if (binder_stop_on_user_error) \
144 binder_stop_on_user_error = 2; \
145 } while (0)
146
Martijn Coenenfeba3902017-02-03 14:40:45 -0800147#define to_flat_binder_object(hdr) \
148 container_of(hdr, struct flat_binder_object, hdr)
149
150#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
151
Martijn Coenen79802402017-02-03 14:40:51 -0800152#define to_binder_buffer_object(hdr) \
153 container_of(hdr, struct binder_buffer_object, hdr)
154
Martijn Coenendef95c72017-02-03 14:40:52 -0800155#define to_binder_fd_array_object(hdr) \
156 container_of(hdr, struct binder_fd_array_object, hdr)
157
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900158enum binder_stat_types {
159 BINDER_STAT_PROC,
160 BINDER_STAT_THREAD,
161 BINDER_STAT_NODE,
162 BINDER_STAT_REF,
163 BINDER_STAT_DEATH,
164 BINDER_STAT_TRANSACTION,
165 BINDER_STAT_TRANSACTION_COMPLETE,
166 BINDER_STAT_COUNT
167};
168
169struct binder_stats {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -0700170 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
171 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
172 atomic_t obj_created[BINDER_STAT_COUNT];
173 atomic_t obj_deleted[BINDER_STAT_COUNT];
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900174};
175
176static struct binder_stats binder_stats;
177
178static inline void binder_stats_deleted(enum binder_stat_types type)
179{
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -0700180 atomic_inc(&binder_stats.obj_deleted[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900181}
182
183static inline void binder_stats_created(enum binder_stat_types type)
184{
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -0700185 atomic_inc(&binder_stats.obj_created[type]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900186}
187
188struct binder_transaction_log_entry {
189 int debug_id;
Todd Kjosd99c7332017-06-29 12:01:53 -0700190 int debug_id_done;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900191 int call_type;
192 int from_proc;
193 int from_thread;
194 int target_handle;
195 int to_proc;
196 int to_thread;
197 int to_node;
198 int data_size;
199 int offsets_size;
Todd Kjos57ada2f2017-06-29 12:01:46 -0700200 int return_error_line;
201 uint32_t return_error;
202 uint32_t return_error_param;
Martijn Coenen14db3182017-02-03 14:40:47 -0800203 const char *context_name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900204};
205struct binder_transaction_log {
Todd Kjosd99c7332017-06-29 12:01:53 -0700206 atomic_t cur;
207 bool full;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900208 struct binder_transaction_log_entry entry[32];
209};
210static struct binder_transaction_log binder_transaction_log;
211static struct binder_transaction_log binder_transaction_log_failed;
212
213static struct binder_transaction_log_entry *binder_transaction_log_add(
214 struct binder_transaction_log *log)
215{
216 struct binder_transaction_log_entry *e;
Todd Kjosd99c7332017-06-29 12:01:53 -0700217 unsigned int cur = atomic_inc_return(&log->cur);
Seunghun Lee10f62862014-05-01 01:30:23 +0900218
Todd Kjosd99c7332017-06-29 12:01:53 -0700219 if (cur >= ARRAY_SIZE(log->entry))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900220 log->full = 1;
Todd Kjosd99c7332017-06-29 12:01:53 -0700221 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
222 WRITE_ONCE(e->debug_id_done, 0);
223 /*
224 * write-barrier to synchronize access to e->debug_id_done.
225 * We make sure the initialized 0 value is seen before
226 * memset() other fields are zeroed by memset.
227 */
228 smp_wmb();
229 memset(e, 0, sizeof(*e));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900230 return e;
231}
232
Martijn Coenen342e5c92017-02-03 14:40:46 -0800233struct binder_context {
234 struct binder_node *binder_context_mgr_node;
Todd Kjosc44b1232017-06-29 12:01:43 -0700235 struct mutex context_mgr_node_lock;
236
Martijn Coenen342e5c92017-02-03 14:40:46 -0800237 kuid_t binder_context_mgr_uid;
Martijn Coenen14db3182017-02-03 14:40:47 -0800238 const char *name;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800239};
240
Martijn Coenenac4812c2017-02-03 14:40:48 -0800241struct binder_device {
242 struct hlist_node hlist;
243 struct miscdevice miscdev;
244 struct binder_context context;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800245};
246
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900247struct binder_work {
248 struct list_head entry;
249 enum {
250 BINDER_WORK_TRANSACTION = 1,
251 BINDER_WORK_TRANSACTION_COMPLETE,
Todd Kjos26549d12017-06-29 12:01:55 -0700252 BINDER_WORK_RETURN_ERROR,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900253 BINDER_WORK_NODE,
254 BINDER_WORK_DEAD_BINDER,
255 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
256 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
257 } type;
258};
259
Todd Kjos26549d12017-06-29 12:01:55 -0700260struct binder_error {
261 struct binder_work work;
262 uint32_t cmd;
263};
264
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900265struct binder_node {
266 int debug_id;
267 struct binder_work work;
268 union {
269 struct rb_node rb_node;
270 struct hlist_node dead_node;
271 };
272 struct binder_proc *proc;
273 struct hlist_head refs;
274 int internal_strong_refs;
275 int local_weak_refs;
276 int local_strong_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800277 binder_uintptr_t ptr;
278 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900279 unsigned has_strong_ref:1;
280 unsigned pending_strong_ref:1;
281 unsigned has_weak_ref:1;
282 unsigned pending_weak_ref:1;
283 unsigned has_async_transaction:1;
284 unsigned accept_fds:1;
285 unsigned min_priority:8;
286 struct list_head async_todo;
287};
288
289struct binder_ref_death {
290 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800291 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900292};
293
294struct binder_ref {
295 /* Lookups needed: */
296 /* node + proc => ref (transaction) */
297 /* desc + proc => ref (transaction, inc/dec ref) */
298 /* node => refs + procs (proc exit) */
299 int debug_id;
300 struct rb_node rb_node_desc;
301 struct rb_node rb_node_node;
302 struct hlist_node node_entry;
303 struct binder_proc *proc;
304 struct binder_node *node;
305 uint32_t desc;
306 int strong;
307 int weak;
308 struct binder_ref_death *death;
309};
310
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900311enum binder_deferred_state {
312 BINDER_DEFERRED_PUT_FILES = 0x01,
313 BINDER_DEFERRED_FLUSH = 0x02,
314 BINDER_DEFERRED_RELEASE = 0x04,
315};
316
317struct binder_proc {
318 struct hlist_node proc_node;
319 struct rb_root threads;
320 struct rb_root nodes;
321 struct rb_root refs_by_desc;
322 struct rb_root refs_by_node;
323 int pid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900324 struct task_struct *tsk;
325 struct files_struct *files;
326 struct hlist_node deferred_work_node;
327 int deferred_work;
Todd Kjos7a4408c2017-06-29 12:01:57 -0700328 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900329
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900330 struct list_head todo;
331 wait_queue_head_t wait;
332 struct binder_stats stats;
333 struct list_head delivered_death;
334 int max_threads;
335 int requested_threads;
336 int requested_threads_started;
337 int ready_threads;
Todd Kjos7a4408c2017-06-29 12:01:57 -0700338 int tmp_ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900339 long default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700340 struct dentry *debugfs_entry;
Todd Kjosfdfb4a92017-06-29 12:01:38 -0700341 struct binder_alloc alloc;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800342 struct binder_context *context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900343};
344
345enum {
346 BINDER_LOOPER_STATE_REGISTERED = 0x01,
347 BINDER_LOOPER_STATE_ENTERED = 0x02,
348 BINDER_LOOPER_STATE_EXITED = 0x04,
349 BINDER_LOOPER_STATE_INVALID = 0x08,
350 BINDER_LOOPER_STATE_WAITING = 0x10,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900351};
352
353struct binder_thread {
354 struct binder_proc *proc;
355 struct rb_node rb_node;
356 int pid;
Todd Kjos08dabce2017-06-29 12:01:49 -0700357 int looper; /* only modified by this thread */
358 bool looper_need_return; /* can be written by other thread */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900359 struct binder_transaction *transaction_stack;
360 struct list_head todo;
Todd Kjos26549d12017-06-29 12:01:55 -0700361 struct binder_error return_error;
362 struct binder_error reply_error;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900363 wait_queue_head_t wait;
364 struct binder_stats stats;
Todd Kjos7a4408c2017-06-29 12:01:57 -0700365 atomic_t tmp_ref;
366 bool is_dead;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900367};
368
369struct binder_transaction {
370 int debug_id;
371 struct binder_work work;
372 struct binder_thread *from;
373 struct binder_transaction *from_parent;
374 struct binder_proc *to_proc;
375 struct binder_thread *to_thread;
376 struct binder_transaction *to_parent;
377 unsigned need_reply:1;
378 /* unsigned is_dead:1; */ /* not used at the moment */
379
380 struct binder_buffer *buffer;
381 unsigned int code;
382 unsigned int flags;
383 long priority;
384 long saved_priority;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600385 kuid_t sender_euid;
Todd Kjos7a4408c2017-06-29 12:01:57 -0700386 /**
387 * @lock: protects @from, @to_proc, and @to_thread
388 *
389 * @from, @to_proc, and @to_thread can be set to NULL
390 * during thread teardown
391 */
392 spinlock_t lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900393};
394
395static void
396binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
Todd Kjos7a4408c2017-06-29 12:01:57 -0700397static void binder_free_thread(struct binder_thread *thread);
398static void binder_free_proc(struct binder_proc *proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900399
Sachin Kamatefde99c2012-08-17 16:39:36 +0530400static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900401{
402 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900403 unsigned long rlim_cur;
404 unsigned long irqs;
405
406 if (files == NULL)
407 return -ESRCH;
408
Al Virodcfadfa2012-08-12 17:27:30 -0400409 if (!lock_task_sighand(proc->tsk, &irqs))
410 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900411
Al Virodcfadfa2012-08-12 17:27:30 -0400412 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
413 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900414
Al Virodcfadfa2012-08-12 17:27:30 -0400415 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900416}
417
418/*
419 * copied from fd_install
420 */
421static void task_fd_install(
422 struct binder_proc *proc, unsigned int fd, struct file *file)
423{
Al Virof869e8a2012-08-15 21:06:33 -0400424 if (proc->files)
425 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900426}
427
428/*
429 * copied from sys_close
430 */
431static long task_close_fd(struct binder_proc *proc, unsigned int fd)
432{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900433 int retval;
434
Al Viro483ce1d2012-08-19 12:04:24 -0400435 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900436 return -ESRCH;
437
Al Viro483ce1d2012-08-19 12:04:24 -0400438 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900439 /* can't restart close syscall because file table entry was cleared */
440 if (unlikely(retval == -ERESTARTSYS ||
441 retval == -ERESTARTNOINTR ||
442 retval == -ERESTARTNOHAND ||
443 retval == -ERESTART_RESTARTBLOCK))
444 retval = -EINTR;
445
446 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900447}
448
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -0700449static inline void binder_lock(const char *tag)
450{
451 trace_binder_lock(tag);
452 mutex_lock(&binder_main_lock);
453 trace_binder_locked(tag);
454}
455
456static inline void binder_unlock(const char *tag)
457{
458 trace_binder_unlock(tag);
459 mutex_unlock(&binder_main_lock);
460}
461
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900462static void binder_set_nice(long nice)
463{
464 long min_nice;
Seunghun Lee10f62862014-05-01 01:30:23 +0900465
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900466 if (can_nice(current, nice)) {
467 set_user_nice(current, nice);
468 return;
469 }
Dongsheng Yang7aa2c012014-05-08 18:33:49 +0900470 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900471 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530472 "%d: nice value %ld not allowed use %ld instead\n",
473 current->pid, nice, min_nice);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900474 set_user_nice(current, min_nice);
Dongsheng Yang8698a742014-03-11 18:09:12 +0800475 if (min_nice <= MAX_NICE)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900476 return;
Anmol Sarma56b468f2012-10-30 22:35:43 +0530477 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900478}
479
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900480static struct binder_node *binder_get_node(struct binder_proc *proc,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800481 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900482{
483 struct rb_node *n = proc->nodes.rb_node;
484 struct binder_node *node;
485
486 while (n) {
487 node = rb_entry(n, struct binder_node, rb_node);
488
489 if (ptr < node->ptr)
490 n = n->rb_left;
491 else if (ptr > node->ptr)
492 n = n->rb_right;
493 else
494 return node;
495 }
496 return NULL;
497}
498
499static struct binder_node *binder_new_node(struct binder_proc *proc,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800500 binder_uintptr_t ptr,
501 binder_uintptr_t cookie)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900502{
503 struct rb_node **p = &proc->nodes.rb_node;
504 struct rb_node *parent = NULL;
505 struct binder_node *node;
506
507 while (*p) {
508 parent = *p;
509 node = rb_entry(parent, struct binder_node, rb_node);
510
511 if (ptr < node->ptr)
512 p = &(*p)->rb_left;
513 else if (ptr > node->ptr)
514 p = &(*p)->rb_right;
515 else
516 return NULL;
517 }
518
519 node = kzalloc(sizeof(*node), GFP_KERNEL);
520 if (node == NULL)
521 return NULL;
522 binder_stats_created(BINDER_STAT_NODE);
523 rb_link_node(&node->rb_node, parent, p);
524 rb_insert_color(&node->rb_node, &proc->nodes);
Todd Kjos656a8002017-06-29 12:01:45 -0700525 node->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900526 node->proc = proc;
527 node->ptr = ptr;
528 node->cookie = cookie;
529 node->work.type = BINDER_WORK_NODE;
530 INIT_LIST_HEAD(&node->work.entry);
531 INIT_LIST_HEAD(&node->async_todo);
532 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800533 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900534 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800535 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900536 return node;
537}
538
539static int binder_inc_node(struct binder_node *node, int strong, int internal,
540 struct list_head *target_list)
541{
542 if (strong) {
543 if (internal) {
544 if (target_list == NULL &&
545 node->internal_strong_refs == 0 &&
Martijn Coenen342e5c92017-02-03 14:40:46 -0800546 !(node->proc &&
547 node == node->proc->context->binder_context_mgr_node &&
548 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530549 pr_err("invalid inc strong node for %d\n",
550 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900551 return -EINVAL;
552 }
553 node->internal_strong_refs++;
554 } else
555 node->local_strong_refs++;
556 if (!node->has_strong_ref && target_list) {
557 list_del_init(&node->work.entry);
558 list_add_tail(&node->work.entry, target_list);
559 }
560 } else {
561 if (!internal)
562 node->local_weak_refs++;
563 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
564 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530565 pr_err("invalid inc weak node for %d\n",
566 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900567 return -EINVAL;
568 }
569 list_add_tail(&node->work.entry, target_list);
570 }
571 }
572 return 0;
573}
574
575static int binder_dec_node(struct binder_node *node, int strong, int internal)
576{
577 if (strong) {
578 if (internal)
579 node->internal_strong_refs--;
580 else
581 node->local_strong_refs--;
582 if (node->local_strong_refs || node->internal_strong_refs)
583 return 0;
584 } else {
585 if (!internal)
586 node->local_weak_refs--;
587 if (node->local_weak_refs || !hlist_empty(&node->refs))
588 return 0;
589 }
590 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
591 if (list_empty(&node->work.entry)) {
592 list_add_tail(&node->work.entry, &node->proc->todo);
593 wake_up_interruptible(&node->proc->wait);
594 }
595 } else {
596 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
597 !node->local_weak_refs) {
598 list_del_init(&node->work.entry);
599 if (node->proc) {
600 rb_erase(&node->rb_node, &node->proc->nodes);
601 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530602 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900603 node->debug_id);
604 } else {
Todd Kjosc44b1232017-06-29 12:01:43 -0700605 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900606 hlist_del(&node->dead_node);
Todd Kjosc44b1232017-06-29 12:01:43 -0700607 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900608 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530609 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900610 node->debug_id);
611 }
612 kfree(node);
613 binder_stats_deleted(BINDER_STAT_NODE);
614 }
615 }
616
617 return 0;
618}
619
620
621static struct binder_ref *binder_get_ref(struct binder_proc *proc,
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +0200622 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900623{
624 struct rb_node *n = proc->refs_by_desc.rb_node;
625 struct binder_ref *ref;
626
627 while (n) {
628 ref = rb_entry(n, struct binder_ref, rb_node_desc);
629
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +0200630 if (desc < ref->desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900631 n = n->rb_left;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +0200632 } else if (desc > ref->desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900633 n = n->rb_right;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +0200634 } else if (need_strong_ref && !ref->strong) {
635 binder_user_error("tried to use weak ref as strong ref\n");
636 return NULL;
637 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900638 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +0200639 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900640 }
641 return NULL;
642}
643
644static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
645 struct binder_node *node)
646{
647 struct rb_node *n;
648 struct rb_node **p = &proc->refs_by_node.rb_node;
649 struct rb_node *parent = NULL;
650 struct binder_ref *ref, *new_ref;
Martijn Coenen342e5c92017-02-03 14:40:46 -0800651 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900652
653 while (*p) {
654 parent = *p;
655 ref = rb_entry(parent, struct binder_ref, rb_node_node);
656
657 if (node < ref->node)
658 p = &(*p)->rb_left;
659 else if (node > ref->node)
660 p = &(*p)->rb_right;
661 else
662 return ref;
663 }
664 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
665 if (new_ref == NULL)
666 return NULL;
667 binder_stats_created(BINDER_STAT_REF);
Todd Kjos656a8002017-06-29 12:01:45 -0700668 new_ref->debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900669 new_ref->proc = proc;
670 new_ref->node = node;
671 rb_link_node(&new_ref->rb_node_node, parent, p);
672 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
673
Martijn Coenen342e5c92017-02-03 14:40:46 -0800674 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900675 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
676 ref = rb_entry(n, struct binder_ref, rb_node_desc);
677 if (ref->desc > new_ref->desc)
678 break;
679 new_ref->desc = ref->desc + 1;
680 }
681
682 p = &proc->refs_by_desc.rb_node;
683 while (*p) {
684 parent = *p;
685 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
686
687 if (new_ref->desc < ref->desc)
688 p = &(*p)->rb_left;
689 else if (new_ref->desc > ref->desc)
690 p = &(*p)->rb_right;
691 else
692 BUG();
693 }
694 rb_link_node(&new_ref->rb_node_desc, parent, p);
695 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
Todd Kjose4cffcf2017-06-29 12:01:50 -0700696 hlist_add_head(&new_ref->node_entry, &node->refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900697
Todd Kjose4cffcf2017-06-29 12:01:50 -0700698 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
699 "%d new ref %d desc %d for node %d\n",
700 proc->pid, new_ref->debug_id, new_ref->desc,
701 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900702 return new_ref;
703}
704
705static void binder_delete_ref(struct binder_ref *ref)
706{
707 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530708 "%d delete ref %d desc %d for node %d\n",
709 ref->proc->pid, ref->debug_id, ref->desc,
710 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900711
712 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
713 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
714 if (ref->strong)
715 binder_dec_node(ref->node, 1, 1);
716 hlist_del(&ref->node_entry);
717 binder_dec_node(ref->node, 0, 1);
718 if (ref->death) {
719 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530720 "%d delete ref %d desc %d has death notification\n",
721 ref->proc->pid, ref->debug_id, ref->desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900722 list_del(&ref->death->work.entry);
723 kfree(ref->death);
724 binder_stats_deleted(BINDER_STAT_DEATH);
725 }
726 kfree(ref);
727 binder_stats_deleted(BINDER_STAT_REF);
728}
729
730static int binder_inc_ref(struct binder_ref *ref, int strong,
731 struct list_head *target_list)
732{
733 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900734
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900735 if (strong) {
736 if (ref->strong == 0) {
737 ret = binder_inc_node(ref->node, 1, 1, target_list);
738 if (ret)
739 return ret;
740 }
741 ref->strong++;
742 } else {
743 if (ref->weak == 0) {
744 ret = binder_inc_node(ref->node, 0, 1, target_list);
745 if (ret)
746 return ret;
747 }
748 ref->weak++;
749 }
750 return 0;
751}
752
753
754static int binder_dec_ref(struct binder_ref *ref, int strong)
755{
756 if (strong) {
757 if (ref->strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530758 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900759 ref->proc->pid, ref->debug_id,
760 ref->desc, ref->strong, ref->weak);
761 return -EINVAL;
762 }
763 ref->strong--;
764 if (ref->strong == 0) {
765 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900766
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900767 ret = binder_dec_node(ref->node, strong, 1);
768 if (ret)
769 return ret;
770 }
771 } else {
772 if (ref->weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530773 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900774 ref->proc->pid, ref->debug_id,
775 ref->desc, ref->strong, ref->weak);
776 return -EINVAL;
777 }
778 ref->weak--;
779 }
780 if (ref->strong == 0 && ref->weak == 0)
781 binder_delete_ref(ref);
782 return 0;
783}
784
785static void binder_pop_transaction(struct binder_thread *target_thread,
786 struct binder_transaction *t)
787{
Todd Kjosb6d282c2017-06-29 12:01:54 -0700788 BUG_ON(!target_thread);
789 BUG_ON(target_thread->transaction_stack != t);
790 BUG_ON(target_thread->transaction_stack->from != target_thread);
791 target_thread->transaction_stack =
792 target_thread->transaction_stack->from_parent;
793 t->from = NULL;
794}
795
Todd Kjos7a4408c2017-06-29 12:01:57 -0700796/**
797 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
798 * @thread: thread to decrement
799 *
800 * A thread needs to be kept alive while being used to create or
801 * handle a transaction. binder_get_txn_from() is used to safely
802 * extract t->from from a binder_transaction and keep the thread
803 * indicated by t->from from being freed. When done with that
804 * binder_thread, this function is called to decrement the
805 * tmp_ref and free if appropriate (thread has been released
806 * and no transaction being processed by the driver)
807 */
808static void binder_thread_dec_tmpref(struct binder_thread *thread)
809{
810 /*
811 * atomic is used to protect the counter value while
812 * it cannot reach zero or thread->is_dead is false
813 *
814 * TODO: future patch adds locking to ensure that the
815 * check of tmp_ref and is_dead is done with a lock held
816 */
817 atomic_dec(&thread->tmp_ref);
818 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
819 binder_free_thread(thread);
820 return;
821 }
822}
823
824/**
825 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
826 * @proc: proc to decrement
827 *
828 * A binder_proc needs to be kept alive while being used to create or
829 * handle a transaction. proc->tmp_ref is incremented when
830 * creating a new transaction or the binder_proc is currently in-use
831 * by threads that are being released. When done with the binder_proc,
832 * this function is called to decrement the counter and free the
833 * proc if appropriate (proc has been released, all threads have
834 * been released and not currenly in-use to process a transaction).
835 */
836static void binder_proc_dec_tmpref(struct binder_proc *proc)
837{
838 proc->tmp_ref--;
839 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
840 !proc->tmp_ref) {
841 binder_free_proc(proc);
842 return;
843 }
844}
845
846/**
847 * binder_get_txn_from() - safely extract the "from" thread in transaction
848 * @t: binder transaction for t->from
849 *
850 * Atomically return the "from" thread and increment the tmp_ref
851 * count for the thread to ensure it stays alive until
852 * binder_thread_dec_tmpref() is called.
853 *
854 * Return: the value of t->from
855 */
856static struct binder_thread *binder_get_txn_from(
857 struct binder_transaction *t)
858{
859 struct binder_thread *from;
860
861 spin_lock(&t->lock);
862 from = t->from;
863 if (from)
864 atomic_inc(&from->tmp_ref);
865 spin_unlock(&t->lock);
866 return from;
867}
868
Todd Kjosb6d282c2017-06-29 12:01:54 -0700869static void binder_free_transaction(struct binder_transaction *t)
870{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900871 if (t->buffer)
872 t->buffer->transaction = NULL;
873 kfree(t);
874 binder_stats_deleted(BINDER_STAT_TRANSACTION);
875}
876
877static void binder_send_failed_reply(struct binder_transaction *t,
878 uint32_t error_code)
879{
880 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -0300881 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +0900882
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900883 BUG_ON(t->flags & TF_ONE_WAY);
884 while (1) {
Todd Kjos7a4408c2017-06-29 12:01:57 -0700885 target_thread = binder_get_txn_from(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900886 if (target_thread) {
Todd Kjos26549d12017-06-29 12:01:55 -0700887 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
888 "send failed reply for transaction %d to %d:%d\n",
889 t->debug_id,
890 target_thread->proc->pid,
891 target_thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900892
Todd Kjos26549d12017-06-29 12:01:55 -0700893 binder_pop_transaction(target_thread, t);
894 if (target_thread->reply_error.cmd == BR_OK) {
895 target_thread->reply_error.cmd = error_code;
896 list_add_tail(
897 &target_thread->reply_error.work.entry,
898 &target_thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900899 wake_up_interruptible(&target_thread->wait);
900 } else {
Todd Kjos26549d12017-06-29 12:01:55 -0700901 WARN(1, "Unexpected reply error: %u\n",
902 target_thread->reply_error.cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900903 }
Todd Kjos7a4408c2017-06-29 12:01:57 -0700904 binder_thread_dec_tmpref(target_thread);
Todd Kjos26549d12017-06-29 12:01:55 -0700905 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900906 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900907 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -0300908 next = t->from_parent;
909
910 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
911 "send failed reply for transaction %d, target dead\n",
912 t->debug_id);
913
Todd Kjosb6d282c2017-06-29 12:01:54 -0700914 binder_free_transaction(t);
Lucas Tanured4ec15e2014-07-13 21:31:05 -0300915 if (next == NULL) {
916 binder_debug(BINDER_DEBUG_DEAD_BINDER,
917 "reply failed, no target thread at root\n");
918 return;
919 }
920 t = next;
921 binder_debug(BINDER_DEBUG_DEAD_BINDER,
922 "reply failed, no target thread -- retry %d\n",
923 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900924 }
925}
926
Martijn Coenenfeba3902017-02-03 14:40:45 -0800927/**
928 * binder_validate_object() - checks for a valid metadata object in a buffer.
929 * @buffer: binder_buffer that we're parsing.
930 * @offset: offset in the buffer at which to validate an object.
931 *
932 * Return: If there's a valid metadata object at @offset in @buffer, the
933 * size of that object. Otherwise, it returns zero.
934 */
935static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
936{
937 /* Check if we can read a header first */
938 struct binder_object_header *hdr;
939 size_t object_size = 0;
940
941 if (offset > buffer->data_size - sizeof(*hdr) ||
942 buffer->data_size < sizeof(*hdr) ||
943 !IS_ALIGNED(offset, sizeof(u32)))
944 return 0;
945
946 /* Ok, now see if we can read a complete object. */
947 hdr = (struct binder_object_header *)(buffer->data + offset);
948 switch (hdr->type) {
949 case BINDER_TYPE_BINDER:
950 case BINDER_TYPE_WEAK_BINDER:
951 case BINDER_TYPE_HANDLE:
952 case BINDER_TYPE_WEAK_HANDLE:
953 object_size = sizeof(struct flat_binder_object);
954 break;
955 case BINDER_TYPE_FD:
956 object_size = sizeof(struct binder_fd_object);
957 break;
Martijn Coenen79802402017-02-03 14:40:51 -0800958 case BINDER_TYPE_PTR:
959 object_size = sizeof(struct binder_buffer_object);
960 break;
Martijn Coenendef95c72017-02-03 14:40:52 -0800961 case BINDER_TYPE_FDA:
962 object_size = sizeof(struct binder_fd_array_object);
963 break;
Martijn Coenenfeba3902017-02-03 14:40:45 -0800964 default:
965 return 0;
966 }
967 if (offset <= buffer->data_size - object_size &&
968 buffer->data_size >= object_size)
969 return object_size;
970 else
971 return 0;
972}
973
Martijn Coenen79802402017-02-03 14:40:51 -0800974/**
975 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
976 * @b: binder_buffer containing the object
977 * @index: index in offset array at which the binder_buffer_object is
978 * located
979 * @start: points to the start of the offset array
980 * @num_valid: the number of valid offsets in the offset array
981 *
982 * Return: If @index is within the valid range of the offset array
983 * described by @start and @num_valid, and if there's a valid
984 * binder_buffer_object at the offset found in index @index
985 * of the offset array, that object is returned. Otherwise,
986 * %NULL is returned.
987 * Note that the offset found in index @index itself is not
988 * verified; this function assumes that @num_valid elements
989 * from @start were previously verified to have valid offsets.
990 */
991static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
992 binder_size_t index,
993 binder_size_t *start,
994 binder_size_t num_valid)
995{
996 struct binder_buffer_object *buffer_obj;
997 binder_size_t *offp;
998
999 if (index >= num_valid)
1000 return NULL;
1001
1002 offp = start + index;
1003 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1004 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1005 return NULL;
1006
1007 return buffer_obj;
1008}
1009
1010/**
1011 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1012 * @b: transaction buffer
1013 * @objects_start start of objects buffer
1014 * @buffer: binder_buffer_object in which to fix up
1015 * @offset: start offset in @buffer to fix up
1016 * @last_obj: last binder_buffer_object that we fixed up in
1017 * @last_min_offset: minimum fixup offset in @last_obj
1018 *
1019 * Return: %true if a fixup in buffer @buffer at offset @offset is
1020 * allowed.
1021 *
1022 * For safety reasons, we only allow fixups inside a buffer to happen
1023 * at increasing offsets; additionally, we only allow fixup on the last
1024 * buffer object that was verified, or one of its parents.
1025 *
1026 * Example of what is allowed:
1027 *
1028 * A
1029 * B (parent = A, offset = 0)
1030 * C (parent = A, offset = 16)
1031 * D (parent = C, offset = 0)
1032 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1033 *
1034 * Examples of what is not allowed:
1035 *
1036 * Decreasing offsets within the same parent:
1037 * A
1038 * C (parent = A, offset = 16)
1039 * B (parent = A, offset = 0) // decreasing offset within A
1040 *
1041 * Referring to a parent that wasn't the last object or any of its parents:
1042 * A
1043 * B (parent = A, offset = 0)
1044 * C (parent = A, offset = 0)
1045 * C (parent = A, offset = 16)
1046 * D (parent = B, offset = 0) // B is not A or any of A's parents
1047 */
1048static bool binder_validate_fixup(struct binder_buffer *b,
1049 binder_size_t *objects_start,
1050 struct binder_buffer_object *buffer,
1051 binder_size_t fixup_offset,
1052 struct binder_buffer_object *last_obj,
1053 binder_size_t last_min_offset)
1054{
1055 if (!last_obj) {
1056 /* Nothing to fix up in */
1057 return false;
1058 }
1059
1060 while (last_obj != buffer) {
1061 /*
1062 * Safe to retrieve the parent of last_obj, since it
1063 * was already previously verified by the driver.
1064 */
1065 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1066 return false;
1067 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1068 last_obj = (struct binder_buffer_object *)
1069 (b->data + *(objects_start + last_obj->parent));
1070 }
1071 return (fixup_offset >= last_min_offset);
1072}
1073
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001074static void binder_transaction_buffer_release(struct binder_proc *proc,
1075 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001076 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001077{
Martijn Coenen79802402017-02-03 14:40:51 -08001078 binder_size_t *offp, *off_start, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001079 int debug_id = buffer->debug_id;
1080
1081 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301082 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001083 proc->pid, buffer->debug_id,
1084 buffer->data_size, buffer->offsets_size, failed_at);
1085
1086 if (buffer->target_node)
1087 binder_dec_node(buffer->target_node, 1, 0);
1088
Martijn Coenen79802402017-02-03 14:40:51 -08001089 off_start = (binder_size_t *)(buffer->data +
1090 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001091 if (failed_at)
1092 off_end = failed_at;
1093 else
Martijn Coenen79802402017-02-03 14:40:51 -08001094 off_end = (void *)off_start + buffer->offsets_size;
1095 for (offp = off_start; offp < off_end; offp++) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001096 struct binder_object_header *hdr;
1097 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09001098
Martijn Coenenfeba3902017-02-03 14:40:45 -08001099 if (object_size == 0) {
1100 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08001101 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001102 continue;
1103 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001104 hdr = (struct binder_object_header *)(buffer->data + *offp);
1105 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001106 case BINDER_TYPE_BINDER:
1107 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001108 struct flat_binder_object *fp;
1109 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09001110
Martijn Coenenfeba3902017-02-03 14:40:45 -08001111 fp = to_flat_binder_object(hdr);
1112 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001113 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001114 pr_err("transaction release %d bad node %016llx\n",
1115 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001116 break;
1117 }
1118 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001119 " node %d u%016llx\n",
1120 node->debug_id, (u64)node->ptr);
Martijn Coenenfeba3902017-02-03 14:40:45 -08001121 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1122 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001123 } break;
1124 case BINDER_TYPE_HANDLE:
1125 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001126 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001127 struct binder_ref *ref;
1128
Martijn Coenenfeba3902017-02-03 14:40:45 -08001129 fp = to_flat_binder_object(hdr);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001130 ref = binder_get_ref(proc, fp->handle,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001131 hdr->type == BINDER_TYPE_HANDLE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001132 if (ref == NULL) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001133 pr_err("transaction release %d bad handle %d\n",
Anmol Sarma56b468f2012-10-30 22:35:43 +05301134 debug_id, fp->handle);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001135 break;
1136 }
1137 binder_debug(BINDER_DEBUG_TRANSACTION,
1138 " ref %d desc %d (node %d)\n",
1139 ref->debug_id, ref->desc, ref->node->debug_id);
Martijn Coenenfeba3902017-02-03 14:40:45 -08001140 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001141 } break;
1142
Martijn Coenenfeba3902017-02-03 14:40:45 -08001143 case BINDER_TYPE_FD: {
1144 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1145
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001146 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001147 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001148 if (failed_at)
Martijn Coenenfeba3902017-02-03 14:40:45 -08001149 task_close_fd(proc, fp->fd);
1150 } break;
Martijn Coenen79802402017-02-03 14:40:51 -08001151 case BINDER_TYPE_PTR:
1152 /*
1153 * Nothing to do here, this will get cleaned up when the
1154 * transaction buffer gets freed
1155 */
1156 break;
Martijn Coenendef95c72017-02-03 14:40:52 -08001157 case BINDER_TYPE_FDA: {
1158 struct binder_fd_array_object *fda;
1159 struct binder_buffer_object *parent;
1160 uintptr_t parent_buffer;
1161 u32 *fd_array;
1162 size_t fd_index;
1163 binder_size_t fd_buf_size;
1164
1165 fda = to_binder_fd_array_object(hdr);
1166 parent = binder_validate_ptr(buffer, fda->parent,
1167 off_start,
1168 offp - off_start);
1169 if (!parent) {
1170 pr_err("transaction release %d bad parent offset",
1171 debug_id);
1172 continue;
1173 }
1174 /*
1175 * Since the parent was already fixed up, convert it
1176 * back to kernel address space to access it
1177 */
1178 parent_buffer = parent->buffer -
Todd Kjos19c98722017-06-29 12:01:40 -07001179 binder_alloc_get_user_buffer_offset(
1180 &proc->alloc);
Martijn Coenendef95c72017-02-03 14:40:52 -08001181
1182 fd_buf_size = sizeof(u32) * fda->num_fds;
1183 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1184 pr_err("transaction release %d invalid number of fds (%lld)\n",
1185 debug_id, (u64)fda->num_fds);
1186 continue;
1187 }
1188 if (fd_buf_size > parent->length ||
1189 fda->parent_offset > parent->length - fd_buf_size) {
1190 /* No space for all file descriptors here. */
1191 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1192 debug_id, (u64)fda->num_fds);
1193 continue;
1194 }
1195 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1196 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1197 task_close_fd(proc, fd_array[fd_index]);
1198 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001199 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001200 pr_err("transaction release %d bad object type %x\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001201 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001202 break;
1203 }
1204 }
1205}
1206
Martijn Coenena056af42017-02-03 14:40:49 -08001207static int binder_translate_binder(struct flat_binder_object *fp,
1208 struct binder_transaction *t,
1209 struct binder_thread *thread)
1210{
1211 struct binder_node *node;
1212 struct binder_ref *ref;
1213 struct binder_proc *proc = thread->proc;
1214 struct binder_proc *target_proc = t->to_proc;
1215
1216 node = binder_get_node(proc, fp->binder);
1217 if (!node) {
1218 node = binder_new_node(proc, fp->binder, fp->cookie);
1219 if (!node)
1220 return -ENOMEM;
1221
1222 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1223 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1224 }
1225 if (fp->cookie != node->cookie) {
1226 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1227 proc->pid, thread->pid, (u64)fp->binder,
1228 node->debug_id, (u64)fp->cookie,
1229 (u64)node->cookie);
1230 return -EINVAL;
1231 }
1232 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1233 return -EPERM;
1234
1235 ref = binder_get_ref_for_node(target_proc, node);
1236 if (!ref)
Todd Kjos57ada2f2017-06-29 12:01:46 -07001237 return -ENOMEM;
Martijn Coenena056af42017-02-03 14:40:49 -08001238
1239 if (fp->hdr.type == BINDER_TYPE_BINDER)
1240 fp->hdr.type = BINDER_TYPE_HANDLE;
1241 else
1242 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1243 fp->binder = 0;
1244 fp->handle = ref->desc;
1245 fp->cookie = 0;
1246 binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1247
1248 trace_binder_transaction_node_to_ref(t, node, ref);
1249 binder_debug(BINDER_DEBUG_TRANSACTION,
1250 " node %d u%016llx -> ref %d desc %d\n",
1251 node->debug_id, (u64)node->ptr,
1252 ref->debug_id, ref->desc);
1253
1254 return 0;
1255}
1256
1257static int binder_translate_handle(struct flat_binder_object *fp,
1258 struct binder_transaction *t,
1259 struct binder_thread *thread)
1260{
1261 struct binder_ref *ref;
1262 struct binder_proc *proc = thread->proc;
1263 struct binder_proc *target_proc = t->to_proc;
1264
1265 ref = binder_get_ref(proc, fp->handle,
1266 fp->hdr.type == BINDER_TYPE_HANDLE);
1267 if (!ref) {
1268 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1269 proc->pid, thread->pid, fp->handle);
1270 return -EINVAL;
1271 }
1272 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1273 return -EPERM;
1274
1275 if (ref->node->proc == target_proc) {
1276 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1277 fp->hdr.type = BINDER_TYPE_BINDER;
1278 else
1279 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1280 fp->binder = ref->node->ptr;
1281 fp->cookie = ref->node->cookie;
1282 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1283 0, NULL);
1284 trace_binder_transaction_ref_to_node(t, ref);
1285 binder_debug(BINDER_DEBUG_TRANSACTION,
1286 " ref %d desc %d -> node %d u%016llx\n",
1287 ref->debug_id, ref->desc, ref->node->debug_id,
1288 (u64)ref->node->ptr);
1289 } else {
1290 struct binder_ref *new_ref;
1291
1292 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1293 if (!new_ref)
Todd Kjos57ada2f2017-06-29 12:01:46 -07001294 return -ENOMEM;
Martijn Coenena056af42017-02-03 14:40:49 -08001295
1296 fp->binder = 0;
1297 fp->handle = new_ref->desc;
1298 fp->cookie = 0;
1299 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1300 NULL);
1301 trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1302 binder_debug(BINDER_DEBUG_TRANSACTION,
1303 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1304 ref->debug_id, ref->desc, new_ref->debug_id,
1305 new_ref->desc, ref->node->debug_id);
1306 }
1307 return 0;
1308}
1309
1310static int binder_translate_fd(int fd,
1311 struct binder_transaction *t,
1312 struct binder_thread *thread,
1313 struct binder_transaction *in_reply_to)
1314{
1315 struct binder_proc *proc = thread->proc;
1316 struct binder_proc *target_proc = t->to_proc;
1317 int target_fd;
1318 struct file *file;
1319 int ret;
1320 bool target_allows_fd;
1321
1322 if (in_reply_to)
1323 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1324 else
1325 target_allows_fd = t->buffer->target_node->accept_fds;
1326 if (!target_allows_fd) {
1327 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1328 proc->pid, thread->pid,
1329 in_reply_to ? "reply" : "transaction",
1330 fd);
1331 ret = -EPERM;
1332 goto err_fd_not_accepted;
1333 }
1334
1335 file = fget(fd);
1336 if (!file) {
1337 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1338 proc->pid, thread->pid, fd);
1339 ret = -EBADF;
1340 goto err_fget;
1341 }
1342 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1343 if (ret < 0) {
1344 ret = -EPERM;
1345 goto err_security;
1346 }
1347
1348 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1349 if (target_fd < 0) {
1350 ret = -ENOMEM;
1351 goto err_get_unused_fd;
1352 }
1353 task_fd_install(target_proc, target_fd, file);
1354 trace_binder_transaction_fd(t, fd, target_fd);
1355 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
1356 fd, target_fd);
1357
1358 return target_fd;
1359
1360err_get_unused_fd:
1361err_security:
1362 fput(file);
1363err_fget:
1364err_fd_not_accepted:
1365 return ret;
1366}
1367
Martijn Coenendef95c72017-02-03 14:40:52 -08001368static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1369 struct binder_buffer_object *parent,
1370 struct binder_transaction *t,
1371 struct binder_thread *thread,
1372 struct binder_transaction *in_reply_to)
1373{
1374 binder_size_t fdi, fd_buf_size, num_installed_fds;
1375 int target_fd;
1376 uintptr_t parent_buffer;
1377 u32 *fd_array;
1378 struct binder_proc *proc = thread->proc;
1379 struct binder_proc *target_proc = t->to_proc;
1380
1381 fd_buf_size = sizeof(u32) * fda->num_fds;
1382 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1383 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1384 proc->pid, thread->pid, (u64)fda->num_fds);
1385 return -EINVAL;
1386 }
1387 if (fd_buf_size > parent->length ||
1388 fda->parent_offset > parent->length - fd_buf_size) {
1389 /* No space for all file descriptors here. */
1390 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1391 proc->pid, thread->pid, (u64)fda->num_fds);
1392 return -EINVAL;
1393 }
1394 /*
1395 * Since the parent was already fixed up, convert it
1396 * back to the kernel address space to access it
1397 */
Todd Kjos19c98722017-06-29 12:01:40 -07001398 parent_buffer = parent->buffer -
1399 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
Martijn Coenendef95c72017-02-03 14:40:52 -08001400 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1401 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1402 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1403 proc->pid, thread->pid);
1404 return -EINVAL;
1405 }
1406 for (fdi = 0; fdi < fda->num_fds; fdi++) {
1407 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1408 in_reply_to);
1409 if (target_fd < 0)
1410 goto err_translate_fd_failed;
1411 fd_array[fdi] = target_fd;
1412 }
1413 return 0;
1414
1415err_translate_fd_failed:
1416 /*
1417 * Failed to allocate fd or security error, free fds
1418 * installed so far.
1419 */
1420 num_installed_fds = fdi;
1421 for (fdi = 0; fdi < num_installed_fds; fdi++)
1422 task_close_fd(target_proc, fd_array[fdi]);
1423 return target_fd;
1424}
1425
Martijn Coenen79802402017-02-03 14:40:51 -08001426static int binder_fixup_parent(struct binder_transaction *t,
1427 struct binder_thread *thread,
1428 struct binder_buffer_object *bp,
1429 binder_size_t *off_start,
1430 binder_size_t num_valid,
1431 struct binder_buffer_object *last_fixup_obj,
1432 binder_size_t last_fixup_min_off)
1433{
1434 struct binder_buffer_object *parent;
1435 u8 *parent_buffer;
1436 struct binder_buffer *b = t->buffer;
1437 struct binder_proc *proc = thread->proc;
1438 struct binder_proc *target_proc = t->to_proc;
1439
1440 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1441 return 0;
1442
1443 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1444 if (!parent) {
1445 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1446 proc->pid, thread->pid);
1447 return -EINVAL;
1448 }
1449
1450 if (!binder_validate_fixup(b, off_start,
1451 parent, bp->parent_offset,
1452 last_fixup_obj,
1453 last_fixup_min_off)) {
1454 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1455 proc->pid, thread->pid);
1456 return -EINVAL;
1457 }
1458
1459 if (parent->length < sizeof(binder_uintptr_t) ||
1460 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1461 /* No space for a pointer here! */
1462 binder_user_error("%d:%d got transaction with invalid parent offset\n",
1463 proc->pid, thread->pid);
1464 return -EINVAL;
1465 }
1466 parent_buffer = (u8 *)(parent->buffer -
Todd Kjos19c98722017-06-29 12:01:40 -07001467 binder_alloc_get_user_buffer_offset(
1468 &target_proc->alloc));
Martijn Coenen79802402017-02-03 14:40:51 -08001469 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1470
1471 return 0;
1472}
1473
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001474static void binder_transaction(struct binder_proc *proc,
1475 struct binder_thread *thread,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001476 struct binder_transaction_data *tr, int reply,
1477 binder_size_t extra_buffers_size)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001478{
Martijn Coenena056af42017-02-03 14:40:49 -08001479 int ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001480 struct binder_transaction *t;
1481 struct binder_work *tcomplete;
Martijn Coenen79802402017-02-03 14:40:51 -08001482 binder_size_t *offp, *off_end, *off_start;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001483 binder_size_t off_min;
Martijn Coenen79802402017-02-03 14:40:51 -08001484 u8 *sg_bufp, *sg_buf_end;
Todd Kjos7a4408c2017-06-29 12:01:57 -07001485 struct binder_proc *target_proc = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001486 struct binder_thread *target_thread = NULL;
1487 struct binder_node *target_node = NULL;
1488 struct list_head *target_list;
1489 wait_queue_head_t *target_wait;
1490 struct binder_transaction *in_reply_to = NULL;
1491 struct binder_transaction_log_entry *e;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001492 uint32_t return_error = 0;
1493 uint32_t return_error_param = 0;
1494 uint32_t return_error_line = 0;
Martijn Coenen79802402017-02-03 14:40:51 -08001495 struct binder_buffer_object *last_fixup_obj = NULL;
1496 binder_size_t last_fixup_min_off = 0;
Martijn Coenen342e5c92017-02-03 14:40:46 -08001497 struct binder_context *context = proc->context;
Todd Kjosd99c7332017-06-29 12:01:53 -07001498 int t_debug_id = atomic_inc_return(&binder_last_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001499
1500 e = binder_transaction_log_add(&binder_transaction_log);
Todd Kjosd99c7332017-06-29 12:01:53 -07001501 e->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001502 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1503 e->from_proc = proc->pid;
1504 e->from_thread = thread->pid;
1505 e->target_handle = tr->target.handle;
1506 e->data_size = tr->data_size;
1507 e->offsets_size = tr->offsets_size;
Martijn Coenen14db3182017-02-03 14:40:47 -08001508 e->context_name = proc->context->name;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001509
1510 if (reply) {
1511 in_reply_to = thread->transaction_stack;
1512 if (in_reply_to == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301513 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001514 proc->pid, thread->pid);
1515 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001516 return_error_param = -EPROTO;
1517 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001518 goto err_empty_call_stack;
1519 }
1520 binder_set_nice(in_reply_to->saved_priority);
1521 if (in_reply_to->to_thread != thread) {
Todd Kjos7a4408c2017-06-29 12:01:57 -07001522 spin_lock(&in_reply_to->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05301523 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001524 proc->pid, thread->pid, in_reply_to->debug_id,
1525 in_reply_to->to_proc ?
1526 in_reply_to->to_proc->pid : 0,
1527 in_reply_to->to_thread ?
1528 in_reply_to->to_thread->pid : 0);
Todd Kjos7a4408c2017-06-29 12:01:57 -07001529 spin_unlock(&in_reply_to->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001530 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001531 return_error_param = -EPROTO;
1532 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001533 in_reply_to = NULL;
1534 goto err_bad_call_stack;
1535 }
1536 thread->transaction_stack = in_reply_to->to_parent;
Todd Kjos7a4408c2017-06-29 12:01:57 -07001537 target_thread = binder_get_txn_from(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001538 if (target_thread == NULL) {
1539 return_error = BR_DEAD_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001540 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001541 goto err_dead_binder;
1542 }
1543 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301544 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001545 proc->pid, thread->pid,
1546 target_thread->transaction_stack ?
1547 target_thread->transaction_stack->debug_id : 0,
1548 in_reply_to->debug_id);
1549 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001550 return_error_param = -EPROTO;
1551 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001552 in_reply_to = NULL;
1553 target_thread = NULL;
1554 goto err_dead_binder;
1555 }
1556 target_proc = target_thread->proc;
Todd Kjos7a4408c2017-06-29 12:01:57 -07001557 target_proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001558 } else {
1559 if (tr->target.handle) {
1560 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09001561
Todd Kjoseb349832017-06-29 12:01:56 -07001562 /*
1563 * There must already be a strong ref
1564 * on this node. If so, do a strong
1565 * increment on the node to ensure it
1566 * stays alive until the transaction is
1567 * done.
1568 */
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001569 ref = binder_get_ref(proc, tr->target.handle, true);
Todd Kjoseb349832017-06-29 12:01:56 -07001570 if (ref) {
1571 binder_inc_node(ref->node, 1, 0, NULL);
1572 target_node = ref->node;
1573 }
1574 if (target_node == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301575 binder_user_error("%d:%d got transaction to invalid handle\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001576 proc->pid, thread->pid);
1577 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001578 return_error_param = -EINVAL;
1579 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001580 goto err_invalid_target_handle;
1581 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001582 } else {
Todd Kjosc44b1232017-06-29 12:01:43 -07001583 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen342e5c92017-02-03 14:40:46 -08001584 target_node = context->binder_context_mgr_node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001585 if (target_node == NULL) {
1586 return_error = BR_DEAD_REPLY;
Todd Kjosc44b1232017-06-29 12:01:43 -07001587 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjos57ada2f2017-06-29 12:01:46 -07001588 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001589 goto err_no_context_mgr_node;
1590 }
Todd Kjoseb349832017-06-29 12:01:56 -07001591 binder_inc_node(target_node, 1, 0, NULL);
Todd Kjosc44b1232017-06-29 12:01:43 -07001592 mutex_unlock(&context->context_mgr_node_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001593 }
1594 e->to_node = target_node->debug_id;
1595 target_proc = target_node->proc;
1596 if (target_proc == NULL) {
1597 return_error = BR_DEAD_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001598 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001599 goto err_dead_binder;
1600 }
Todd Kjos7a4408c2017-06-29 12:01:57 -07001601 target_proc->tmp_ref++;
Stephen Smalley79af7302015-01-21 10:54:10 -05001602 if (security_binder_transaction(proc->tsk,
1603 target_proc->tsk) < 0) {
1604 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001605 return_error_param = -EPERM;
1606 return_error_line = __LINE__;
Stephen Smalley79af7302015-01-21 10:54:10 -05001607 goto err_invalid_target_handle;
1608 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001609 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1610 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09001611
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001612 tmp = thread->transaction_stack;
1613 if (tmp->to_thread != thread) {
Todd Kjos7a4408c2017-06-29 12:01:57 -07001614 spin_lock(&tmp->lock);
Anmol Sarma56b468f2012-10-30 22:35:43 +05301615 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001616 proc->pid, thread->pid, tmp->debug_id,
1617 tmp->to_proc ? tmp->to_proc->pid : 0,
1618 tmp->to_thread ?
1619 tmp->to_thread->pid : 0);
Todd Kjos7a4408c2017-06-29 12:01:57 -07001620 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001621 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001622 return_error_param = -EPROTO;
1623 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001624 goto err_bad_call_stack;
1625 }
1626 while (tmp) {
Todd Kjos7a4408c2017-06-29 12:01:57 -07001627 struct binder_thread *from;
1628
1629 spin_lock(&tmp->lock);
1630 from = tmp->from;
1631 if (from && from->proc == target_proc) {
1632 atomic_inc(&from->tmp_ref);
1633 target_thread = from;
1634 spin_unlock(&tmp->lock);
1635 break;
1636 }
1637 spin_unlock(&tmp->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001638 tmp = tmp->from_parent;
1639 }
1640 }
1641 }
1642 if (target_thread) {
1643 e->to_thread = target_thread->pid;
1644 target_list = &target_thread->todo;
1645 target_wait = &target_thread->wait;
1646 } else {
1647 target_list = &target_proc->todo;
1648 target_wait = &target_proc->wait;
1649 }
1650 e->to_proc = target_proc->pid;
1651
1652 /* TODO: reuse incoming transaction for reply */
1653 t = kzalloc(sizeof(*t), GFP_KERNEL);
1654 if (t == NULL) {
1655 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001656 return_error_param = -ENOMEM;
1657 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001658 goto err_alloc_t_failed;
1659 }
1660 binder_stats_created(BINDER_STAT_TRANSACTION);
Todd Kjos7a4408c2017-06-29 12:01:57 -07001661 spin_lock_init(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001662
1663 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1664 if (tcomplete == NULL) {
1665 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001666 return_error_param = -ENOMEM;
1667 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001668 goto err_alloc_tcomplete_failed;
1669 }
1670 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1671
Todd Kjosd99c7332017-06-29 12:01:53 -07001672 t->debug_id = t_debug_id;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001673
1674 if (reply)
1675 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001676 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001677 proc->pid, thread->pid, t->debug_id,
1678 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001679 (u64)tr->data.ptr.buffer,
1680 (u64)tr->data.ptr.offsets,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001681 (u64)tr->data_size, (u64)tr->offsets_size,
1682 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001683 else
1684 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001685 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001686 proc->pid, thread->pid, t->debug_id,
1687 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001688 (u64)tr->data.ptr.buffer,
1689 (u64)tr->data.ptr.offsets,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001690 (u64)tr->data_size, (u64)tr->offsets_size,
1691 (u64)extra_buffers_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001692
1693 if (!reply && !(tr->flags & TF_ONE_WAY))
1694 t->from = thread;
1695 else
1696 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03001697 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001698 t->to_proc = target_proc;
1699 t->to_thread = target_thread;
1700 t->code = tr->code;
1701 t->flags = tr->flags;
1702 t->priority = task_nice(current);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001703
1704 trace_binder_transaction(reply, t, target_node);
1705
Todd Kjos19c98722017-06-29 12:01:40 -07001706 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
Martijn Coenen4bfac802017-02-03 14:40:50 -08001707 tr->offsets_size, extra_buffers_size,
1708 !reply && (t->flags & TF_ONE_WAY));
Todd Kjos57ada2f2017-06-29 12:01:46 -07001709 if (IS_ERR(t->buffer)) {
1710 /*
1711 * -ESRCH indicates VMA cleared. The target is dying.
1712 */
1713 return_error_param = PTR_ERR(t->buffer);
1714 return_error = return_error_param == -ESRCH ?
1715 BR_DEAD_REPLY : BR_FAILED_REPLY;
1716 return_error_line = __LINE__;
1717 t->buffer = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001718 goto err_binder_alloc_buf_failed;
1719 }
1720 t->buffer->allow_user_free = 0;
1721 t->buffer->debug_id = t->debug_id;
1722 t->buffer->transaction = t;
1723 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001724 trace_binder_transaction_alloc_buf(t->buffer);
Martijn Coenen79802402017-02-03 14:40:51 -08001725 off_start = (binder_size_t *)(t->buffer->data +
1726 ALIGN(tr->data_size, sizeof(void *)));
1727 offp = off_start;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001728
Arve Hjønnevågda498892014-02-21 14:40:26 -08001729 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1730 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301731 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1732 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001733 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001734 return_error_param = -EFAULT;
1735 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001736 goto err_copy_data_failed;
1737 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08001738 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1739 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301740 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1741 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001742 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001743 return_error_param = -EFAULT;
1744 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001745 goto err_copy_data_failed;
1746 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08001747 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1748 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1749 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001750 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001751 return_error_param = -EINVAL;
1752 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001753 goto err_bad_offset;
1754 }
Martijn Coenen79802402017-02-03 14:40:51 -08001755 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
1756 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
1757 proc->pid, thread->pid,
1758 (u64)extra_buffers_size);
1759 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001760 return_error_param = -EINVAL;
1761 return_error_line = __LINE__;
Martijn Coenen79802402017-02-03 14:40:51 -08001762 goto err_bad_offset;
1763 }
1764 off_end = (void *)off_start + tr->offsets_size;
1765 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
1766 sg_buf_end = sg_bufp + extra_buffers_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001767 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001768 for (; offp < off_end; offp++) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001769 struct binder_object_header *hdr;
1770 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09001771
Martijn Coenenfeba3902017-02-03 14:40:45 -08001772 if (object_size == 0 || *offp < off_min) {
1773 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001774 proc->pid, thread->pid, (u64)*offp,
1775 (u64)off_min,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001776 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001777 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001778 return_error_param = -EINVAL;
1779 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001780 goto err_bad_offset;
1781 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001782
1783 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
1784 off_min = *offp + object_size;
1785 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001786 case BINDER_TYPE_BINDER:
1787 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001788 struct flat_binder_object *fp;
Seunghun Lee10f62862014-05-01 01:30:23 +09001789
Martijn Coenenfeba3902017-02-03 14:40:45 -08001790 fp = to_flat_binder_object(hdr);
Martijn Coenena056af42017-02-03 14:40:49 -08001791 ret = binder_translate_binder(fp, t, thread);
1792 if (ret < 0) {
Christian Engelmayer7d420432014-05-07 21:44:53 +02001793 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001794 return_error_param = ret;
1795 return_error_line = __LINE__;
Martijn Coenena056af42017-02-03 14:40:49 -08001796 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001797 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001798 } break;
1799 case BINDER_TYPE_HANDLE:
1800 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001801 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001802
Martijn Coenenfeba3902017-02-03 14:40:45 -08001803 fp = to_flat_binder_object(hdr);
Martijn Coenena056af42017-02-03 14:40:49 -08001804 ret = binder_translate_handle(fp, t, thread);
1805 if (ret < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001806 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001807 return_error_param = ret;
1808 return_error_line = __LINE__;
Martijn Coenena056af42017-02-03 14:40:49 -08001809 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001810 }
1811 } break;
1812
1813 case BINDER_TYPE_FD: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001814 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Martijn Coenena056af42017-02-03 14:40:49 -08001815 int target_fd = binder_translate_fd(fp->fd, t, thread,
1816 in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001817
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001818 if (target_fd < 0) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001819 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001820 return_error_param = target_fd;
1821 return_error_line = __LINE__;
Martijn Coenena056af42017-02-03 14:40:49 -08001822 goto err_translate_failed;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001823 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001824 fp->pad_binder = 0;
1825 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001826 } break;
Martijn Coenendef95c72017-02-03 14:40:52 -08001827 case BINDER_TYPE_FDA: {
1828 struct binder_fd_array_object *fda =
1829 to_binder_fd_array_object(hdr);
1830 struct binder_buffer_object *parent =
1831 binder_validate_ptr(t->buffer, fda->parent,
1832 off_start,
1833 offp - off_start);
1834 if (!parent) {
1835 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1836 proc->pid, thread->pid);
1837 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001838 return_error_param = -EINVAL;
1839 return_error_line = __LINE__;
Martijn Coenendef95c72017-02-03 14:40:52 -08001840 goto err_bad_parent;
1841 }
1842 if (!binder_validate_fixup(t->buffer, off_start,
1843 parent, fda->parent_offset,
1844 last_fixup_obj,
1845 last_fixup_min_off)) {
1846 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1847 proc->pid, thread->pid);
1848 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001849 return_error_param = -EINVAL;
1850 return_error_line = __LINE__;
Martijn Coenendef95c72017-02-03 14:40:52 -08001851 goto err_bad_parent;
1852 }
1853 ret = binder_translate_fd_array(fda, parent, t, thread,
1854 in_reply_to);
1855 if (ret < 0) {
1856 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001857 return_error_param = ret;
1858 return_error_line = __LINE__;
Martijn Coenendef95c72017-02-03 14:40:52 -08001859 goto err_translate_failed;
1860 }
1861 last_fixup_obj = parent;
1862 last_fixup_min_off =
1863 fda->parent_offset + sizeof(u32) * fda->num_fds;
1864 } break;
Martijn Coenen79802402017-02-03 14:40:51 -08001865 case BINDER_TYPE_PTR: {
1866 struct binder_buffer_object *bp =
1867 to_binder_buffer_object(hdr);
1868 size_t buf_left = sg_buf_end - sg_bufp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001869
Martijn Coenen79802402017-02-03 14:40:51 -08001870 if (bp->length > buf_left) {
1871 binder_user_error("%d:%d got transaction with too large buffer\n",
1872 proc->pid, thread->pid);
1873 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001874 return_error_param = -EINVAL;
1875 return_error_line = __LINE__;
Martijn Coenen79802402017-02-03 14:40:51 -08001876 goto err_bad_offset;
1877 }
1878 if (copy_from_user(sg_bufp,
1879 (const void __user *)(uintptr_t)
1880 bp->buffer, bp->length)) {
1881 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1882 proc->pid, thread->pid);
Todd Kjos57ada2f2017-06-29 12:01:46 -07001883 return_error_param = -EFAULT;
Martijn Coenen79802402017-02-03 14:40:51 -08001884 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001885 return_error_line = __LINE__;
Martijn Coenen79802402017-02-03 14:40:51 -08001886 goto err_copy_data_failed;
1887 }
1888 /* Fixup buffer pointer to target proc address space */
1889 bp->buffer = (uintptr_t)sg_bufp +
Todd Kjos19c98722017-06-29 12:01:40 -07001890 binder_alloc_get_user_buffer_offset(
1891 &target_proc->alloc);
Martijn Coenen79802402017-02-03 14:40:51 -08001892 sg_bufp += ALIGN(bp->length, sizeof(u64));
1893
1894 ret = binder_fixup_parent(t, thread, bp, off_start,
1895 offp - off_start,
1896 last_fixup_obj,
1897 last_fixup_min_off);
1898 if (ret < 0) {
1899 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001900 return_error_param = ret;
1901 return_error_line = __LINE__;
Martijn Coenen79802402017-02-03 14:40:51 -08001902 goto err_translate_failed;
1903 }
1904 last_fixup_obj = bp;
1905 last_fixup_min_off = 0;
1906 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001907 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001908 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001909 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001910 return_error = BR_FAILED_REPLY;
Todd Kjos57ada2f2017-06-29 12:01:46 -07001911 return_error_param = -EINVAL;
1912 return_error_line = __LINE__;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001913 goto err_bad_object_type;
1914 }
1915 }
Todd Kjosccae6f62017-06-29 12:01:48 -07001916 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1917 list_add_tail(&tcomplete->entry, &thread->todo);
1918
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001919 if (reply) {
Todd Kjos7a4408c2017-06-29 12:01:57 -07001920 if (target_thread->is_dead)
1921 goto err_dead_proc_or_thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001922 BUG_ON(t->buffer->async_transaction != 0);
1923 binder_pop_transaction(target_thread, in_reply_to);
Todd Kjosb6d282c2017-06-29 12:01:54 -07001924 binder_free_transaction(in_reply_to);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001925 } else if (!(t->flags & TF_ONE_WAY)) {
1926 BUG_ON(t->buffer->async_transaction != 0);
1927 t->need_reply = 1;
1928 t->from_parent = thread->transaction_stack;
1929 thread->transaction_stack = t;
Todd Kjos7a4408c2017-06-29 12:01:57 -07001930 if (target_proc->is_dead ||
1931 (target_thread && target_thread->is_dead)) {
1932 binder_pop_transaction(thread, t);
1933 goto err_dead_proc_or_thread;
1934 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001935 } else {
1936 BUG_ON(target_node == NULL);
1937 BUG_ON(t->buffer->async_transaction != 1);
1938 if (target_node->has_async_transaction) {
1939 target_list = &target_node->async_todo;
1940 target_wait = NULL;
1941 } else
1942 target_node->has_async_transaction = 1;
Todd Kjos7a4408c2017-06-29 12:01:57 -07001943 if (target_proc->is_dead ||
1944 (target_thread && target_thread->is_dead))
1945 goto err_dead_proc_or_thread;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001946 }
1947 t->work.type = BINDER_WORK_TRANSACTION;
1948 list_add_tail(&t->work.entry, target_list);
Riley Andrews00b40d62017-06-29 12:01:37 -07001949 if (target_wait) {
Todd Kjosccae6f62017-06-29 12:01:48 -07001950 if (reply || !(tr->flags & TF_ONE_WAY))
Riley Andrews00b40d62017-06-29 12:01:37 -07001951 wake_up_interruptible_sync(target_wait);
1952 else
1953 wake_up_interruptible(target_wait);
1954 }
Todd Kjos7a4408c2017-06-29 12:01:57 -07001955 if (target_thread)
1956 binder_thread_dec_tmpref(target_thread);
1957 binder_proc_dec_tmpref(target_proc);
Todd Kjosd99c7332017-06-29 12:01:53 -07001958 /*
1959 * write barrier to synchronize with initialization
1960 * of log entry
1961 */
1962 smp_wmb();
1963 WRITE_ONCE(e->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001964 return;
1965
Todd Kjos7a4408c2017-06-29 12:01:57 -07001966err_dead_proc_or_thread:
1967 return_error = BR_DEAD_REPLY;
1968 return_error_line = __LINE__;
Martijn Coenena056af42017-02-03 14:40:49 -08001969err_translate_failed:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001970err_bad_object_type:
1971err_bad_offset:
Martijn Coenendef95c72017-02-03 14:40:52 -08001972err_bad_parent:
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001973err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001974 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001975 binder_transaction_buffer_release(target_proc, t->buffer, offp);
Todd Kjoseb349832017-06-29 12:01:56 -07001976 target_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001977 t->buffer->transaction = NULL;
Todd Kjos19c98722017-06-29 12:01:40 -07001978 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001979err_binder_alloc_buf_failed:
1980 kfree(tcomplete);
1981 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1982err_alloc_tcomplete_failed:
1983 kfree(t);
1984 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1985err_alloc_t_failed:
1986err_bad_call_stack:
1987err_empty_call_stack:
1988err_dead_binder:
1989err_invalid_target_handle:
1990err_no_context_mgr_node:
Todd Kjos7a4408c2017-06-29 12:01:57 -07001991 if (target_thread)
1992 binder_thread_dec_tmpref(target_thread);
1993 if (target_proc)
1994 binder_proc_dec_tmpref(target_proc);
Todd Kjoseb349832017-06-29 12:01:56 -07001995 if (target_node)
1996 binder_dec_node(target_node, 1, 0);
1997
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001998 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Todd Kjos57ada2f2017-06-29 12:01:46 -07001999 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2000 proc->pid, thread->pid, return_error, return_error_param,
2001 (u64)tr->data_size, (u64)tr->offsets_size,
2002 return_error_line);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002003
2004 {
2005 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09002006
Todd Kjos57ada2f2017-06-29 12:01:46 -07002007 e->return_error = return_error;
2008 e->return_error_param = return_error_param;
2009 e->return_error_line = return_error_line;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002010 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2011 *fe = *e;
Todd Kjosd99c7332017-06-29 12:01:53 -07002012 /*
2013 * write barrier to synchronize with initialization
2014 * of log entry
2015 */
2016 smp_wmb();
2017 WRITE_ONCE(e->debug_id_done, t_debug_id);
2018 WRITE_ONCE(fe->debug_id_done, t_debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002019 }
2020
Todd Kjos26549d12017-06-29 12:01:55 -07002021 BUG_ON(thread->return_error.cmd != BR_OK);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002022 if (in_reply_to) {
Todd Kjos26549d12017-06-29 12:01:55 -07002023 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
2024 list_add_tail(&thread->return_error.work.entry,
2025 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002026 binder_send_failed_reply(in_reply_to, return_error);
Todd Kjos26549d12017-06-29 12:01:55 -07002027 } else {
2028 thread->return_error.cmd = return_error;
2029 list_add_tail(&thread->return_error.work.entry,
2030 &thread->todo);
2031 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002032}
2033
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02002034static int binder_thread_write(struct binder_proc *proc,
2035 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002036 binder_uintptr_t binder_buffer, size_t size,
2037 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002038{
2039 uint32_t cmd;
Martijn Coenen342e5c92017-02-03 14:40:46 -08002040 struct binder_context *context = proc->context;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002041 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002042 void __user *ptr = buffer + *consumed;
2043 void __user *end = buffer + size;
2044
Todd Kjos26549d12017-06-29 12:01:55 -07002045 while (ptr < end && thread->return_error.cmd == BR_OK) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002046 if (get_user(cmd, (uint32_t __user *)ptr))
2047 return -EFAULT;
2048 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002049 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002050 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07002051 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
2052 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
2053 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002054 }
2055 switch (cmd) {
2056 case BC_INCREFS:
2057 case BC_ACQUIRE:
2058 case BC_RELEASE:
2059 case BC_DECREFS: {
2060 uint32_t target;
Todd Kjosc44b1232017-06-29 12:01:43 -07002061 struct binder_ref *ref = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002062 const char *debug_string;
2063
2064 if (get_user(target, (uint32_t __user *)ptr))
2065 return -EFAULT;
Todd Kjosc44b1232017-06-29 12:01:43 -07002066
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002067 ptr += sizeof(uint32_t);
Todd Kjosc44b1232017-06-29 12:01:43 -07002068 if (target == 0 &&
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002069 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
Todd Kjosc44b1232017-06-29 12:01:43 -07002070 struct binder_node *ctx_mgr_node;
2071
2072 mutex_lock(&context->context_mgr_node_lock);
2073 ctx_mgr_node = context->binder_context_mgr_node;
2074 if (ctx_mgr_node) {
2075 ref = binder_get_ref_for_node(proc,
2076 ctx_mgr_node);
2077 if (ref && ref->desc != target) {
2078 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
2079 proc->pid, thread->pid,
2080 ref->desc);
2081 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002082 }
Todd Kjosc44b1232017-06-29 12:01:43 -07002083 mutex_unlock(&context->context_mgr_node_lock);
2084 }
2085 if (ref == NULL)
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002086 ref = binder_get_ref(proc, target,
2087 cmd == BC_ACQUIRE ||
2088 cmd == BC_RELEASE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002089 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302090 binder_user_error("%d:%d refcount change on invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002091 proc->pid, thread->pid, target);
2092 break;
2093 }
2094 switch (cmd) {
2095 case BC_INCREFS:
2096 debug_string = "IncRefs";
2097 binder_inc_ref(ref, 0, NULL);
2098 break;
2099 case BC_ACQUIRE:
2100 debug_string = "Acquire";
2101 binder_inc_ref(ref, 1, NULL);
2102 break;
2103 case BC_RELEASE:
2104 debug_string = "Release";
2105 binder_dec_ref(ref, 1);
2106 break;
2107 case BC_DECREFS:
2108 default:
2109 debug_string = "DecRefs";
2110 binder_dec_ref(ref, 0);
2111 break;
2112 }
2113 binder_debug(BINDER_DEBUG_USER_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302114 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002115 proc->pid, thread->pid, debug_string, ref->debug_id,
2116 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
2117 break;
2118 }
2119 case BC_INCREFS_DONE:
2120 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002121 binder_uintptr_t node_ptr;
2122 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002123 struct binder_node *node;
2124
Arve Hjønnevågda498892014-02-21 14:40:26 -08002125 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002126 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002127 ptr += sizeof(binder_uintptr_t);
2128 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002129 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002130 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002131 node = binder_get_node(proc, node_ptr);
2132 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002133 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002134 proc->pid, thread->pid,
2135 cmd == BC_INCREFS_DONE ?
2136 "BC_INCREFS_DONE" :
2137 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002138 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002139 break;
2140 }
2141 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002142 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002143 proc->pid, thread->pid,
2144 cmd == BC_INCREFS_DONE ?
2145 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002146 (u64)node_ptr, node->debug_id,
2147 (u64)cookie, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002148 break;
2149 }
2150 if (cmd == BC_ACQUIRE_DONE) {
2151 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302152 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002153 proc->pid, thread->pid,
2154 node->debug_id);
2155 break;
2156 }
2157 node->pending_strong_ref = 0;
2158 } else {
2159 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302160 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002161 proc->pid, thread->pid,
2162 node->debug_id);
2163 break;
2164 }
2165 node->pending_weak_ref = 0;
2166 }
2167 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2168 binder_debug(BINDER_DEBUG_USER_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302169 "%d:%d %s node %d ls %d lw %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002170 proc->pid, thread->pid,
2171 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2172 node->debug_id, node->local_strong_refs, node->local_weak_refs);
2173 break;
2174 }
2175 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302176 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002177 return -EINVAL;
2178 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302179 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002180 return -EINVAL;
2181
2182 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002183 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002184 struct binder_buffer *buffer;
2185
Arve Hjønnevågda498892014-02-21 14:40:26 -08002186 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002187 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002188 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002189
Todd Kjos53d311cf2017-06-29 12:01:51 -07002190 buffer = binder_alloc_prepare_to_free(&proc->alloc,
2191 data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002192 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002193 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2194 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002195 break;
2196 }
2197 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002198 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2199 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002200 break;
2201 }
2202 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002203 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2204 proc->pid, thread->pid, (u64)data_ptr,
2205 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002206 buffer->transaction ? "active" : "finished");
2207
2208 if (buffer->transaction) {
2209 buffer->transaction->buffer = NULL;
2210 buffer->transaction = NULL;
2211 }
2212 if (buffer->async_transaction && buffer->target_node) {
2213 BUG_ON(!buffer->target_node->has_async_transaction);
2214 if (list_empty(&buffer->target_node->async_todo))
2215 buffer->target_node->has_async_transaction = 0;
2216 else
2217 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2218 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002219 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002220 binder_transaction_buffer_release(proc, buffer, NULL);
Todd Kjos19c98722017-06-29 12:01:40 -07002221 binder_alloc_free_buf(&proc->alloc, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002222 break;
2223 }
2224
Martijn Coenen79802402017-02-03 14:40:51 -08002225 case BC_TRANSACTION_SG:
2226 case BC_REPLY_SG: {
2227 struct binder_transaction_data_sg tr;
2228
2229 if (copy_from_user(&tr, ptr, sizeof(tr)))
2230 return -EFAULT;
2231 ptr += sizeof(tr);
2232 binder_transaction(proc, thread, &tr.transaction_data,
2233 cmd == BC_REPLY_SG, tr.buffers_size);
2234 break;
2235 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002236 case BC_TRANSACTION:
2237 case BC_REPLY: {
2238 struct binder_transaction_data tr;
2239
2240 if (copy_from_user(&tr, ptr, sizeof(tr)))
2241 return -EFAULT;
2242 ptr += sizeof(tr);
Martijn Coenen4bfac802017-02-03 14:40:50 -08002243 binder_transaction(proc, thread, &tr,
2244 cmd == BC_REPLY, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002245 break;
2246 }
2247
2248 case BC_REGISTER_LOOPER:
2249 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302250 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002251 proc->pid, thread->pid);
2252 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2253 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302254 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002255 proc->pid, thread->pid);
2256 } else if (proc->requested_threads == 0) {
2257 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302258 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002259 proc->pid, thread->pid);
2260 } else {
2261 proc->requested_threads--;
2262 proc->requested_threads_started++;
2263 }
2264 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2265 break;
2266 case BC_ENTER_LOOPER:
2267 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302268 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002269 proc->pid, thread->pid);
2270 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2271 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302272 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002273 proc->pid, thread->pid);
2274 }
2275 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2276 break;
2277 case BC_EXIT_LOOPER:
2278 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302279 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002280 proc->pid, thread->pid);
2281 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2282 break;
2283
2284 case BC_REQUEST_DEATH_NOTIFICATION:
2285 case BC_CLEAR_DEATH_NOTIFICATION: {
2286 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002287 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002288 struct binder_ref *ref;
2289 struct binder_ref_death *death;
2290
2291 if (get_user(target, (uint32_t __user *)ptr))
2292 return -EFAULT;
2293 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08002294 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002295 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002296 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002297 ref = binder_get_ref(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002298 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302299 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002300 proc->pid, thread->pid,
2301 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2302 "BC_REQUEST_DEATH_NOTIFICATION" :
2303 "BC_CLEAR_DEATH_NOTIFICATION",
2304 target);
2305 break;
2306 }
2307
2308 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002309 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002310 proc->pid, thread->pid,
2311 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2312 "BC_REQUEST_DEATH_NOTIFICATION" :
2313 "BC_CLEAR_DEATH_NOTIFICATION",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002314 (u64)cookie, ref->debug_id, ref->desc,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002315 ref->strong, ref->weak, ref->node->debug_id);
2316
2317 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2318 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302319 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002320 proc->pid, thread->pid);
2321 break;
2322 }
2323 death = kzalloc(sizeof(*death), GFP_KERNEL);
2324 if (death == NULL) {
Todd Kjos26549d12017-06-29 12:01:55 -07002325 WARN_ON(thread->return_error.cmd !=
2326 BR_OK);
2327 thread->return_error.cmd = BR_ERROR;
2328 list_add_tail(
2329 &thread->return_error.work.entry,
2330 &thread->todo);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002331 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302332 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002333 proc->pid, thread->pid);
2334 break;
2335 }
2336 binder_stats_created(BINDER_STAT_DEATH);
2337 INIT_LIST_HEAD(&death->work.entry);
2338 death->cookie = cookie;
2339 ref->death = death;
2340 if (ref->node->proc == NULL) {
2341 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2342 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2343 list_add_tail(&ref->death->work.entry, &thread->todo);
2344 } else {
2345 list_add_tail(&ref->death->work.entry, &proc->todo);
2346 wake_up_interruptible(&proc->wait);
2347 }
2348 }
2349 } else {
2350 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302351 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002352 proc->pid, thread->pid);
2353 break;
2354 }
2355 death = ref->death;
2356 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002357 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002358 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002359 (u64)death->cookie,
2360 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002361 break;
2362 }
2363 ref->death = NULL;
2364 if (list_empty(&death->work.entry)) {
2365 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2366 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2367 list_add_tail(&death->work.entry, &thread->todo);
2368 } else {
2369 list_add_tail(&death->work.entry, &proc->todo);
2370 wake_up_interruptible(&proc->wait);
2371 }
2372 } else {
2373 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2374 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2375 }
2376 }
2377 } break;
2378 case BC_DEAD_BINDER_DONE: {
2379 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002380 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002381 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09002382
Arve Hjønnevågda498892014-02-21 14:40:26 -08002383 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002384 return -EFAULT;
2385
Lisa Du7a64cd82016-02-17 09:32:52 +08002386 ptr += sizeof(cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002387 list_for_each_entry(w, &proc->delivered_death, entry) {
2388 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
Seunghun Lee10f62862014-05-01 01:30:23 +09002389
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002390 if (tmp_death->cookie == cookie) {
2391 death = tmp_death;
2392 break;
2393 }
2394 }
2395 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002396 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2397 proc->pid, thread->pid, (u64)cookie,
2398 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002399 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002400 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2401 proc->pid, thread->pid, (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002402 break;
2403 }
2404
2405 list_del_init(&death->work.entry);
2406 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2407 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2408 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2409 list_add_tail(&death->work.entry, &thread->todo);
2410 } else {
2411 list_add_tail(&death->work.entry, &proc->todo);
2412 wake_up_interruptible(&proc->wait);
2413 }
2414 }
2415 } break;
2416
2417 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302418 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002419 proc->pid, thread->pid, cmd);
2420 return -EINVAL;
2421 }
2422 *consumed = ptr - buffer;
2423 }
2424 return 0;
2425}
2426
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02002427static void binder_stat_br(struct binder_proc *proc,
2428 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002429{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002430 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002431 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07002432 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
2433 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
2434 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002435 }
2436}
2437
2438static int binder_has_proc_work(struct binder_proc *proc,
2439 struct binder_thread *thread)
2440{
Todd Kjos08dabce2017-06-29 12:01:49 -07002441 return !list_empty(&proc->todo) || thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002442}
2443
2444static int binder_has_thread_work(struct binder_thread *thread)
2445{
Todd Kjos26549d12017-06-29 12:01:55 -07002446 return !list_empty(&thread->todo) || thread->looper_need_return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002447}
2448
Todd Kjos26b47d82017-06-29 12:01:47 -07002449static int binder_put_node_cmd(struct binder_proc *proc,
2450 struct binder_thread *thread,
2451 void __user **ptrp,
2452 binder_uintptr_t node_ptr,
2453 binder_uintptr_t node_cookie,
2454 int node_debug_id,
2455 uint32_t cmd, const char *cmd_name)
2456{
2457 void __user *ptr = *ptrp;
2458
2459 if (put_user(cmd, (uint32_t __user *)ptr))
2460 return -EFAULT;
2461 ptr += sizeof(uint32_t);
2462
2463 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
2464 return -EFAULT;
2465 ptr += sizeof(binder_uintptr_t);
2466
2467 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
2468 return -EFAULT;
2469 ptr += sizeof(binder_uintptr_t);
2470
2471 binder_stat_br(proc, thread, cmd);
2472 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
2473 proc->pid, thread->pid, cmd_name, node_debug_id,
2474 (u64)node_ptr, (u64)node_cookie);
2475
2476 *ptrp = ptr;
2477 return 0;
2478}
2479
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002480static int binder_thread_read(struct binder_proc *proc,
2481 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002482 binder_uintptr_t binder_buffer, size_t size,
2483 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002484{
Arve Hjønnevågda498892014-02-21 14:40:26 -08002485 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002486 void __user *ptr = buffer + *consumed;
2487 void __user *end = buffer + size;
2488
2489 int ret = 0;
2490 int wait_for_proc_work;
2491
2492 if (*consumed == 0) {
2493 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2494 return -EFAULT;
2495 ptr += sizeof(uint32_t);
2496 }
2497
2498retry:
2499 wait_for_proc_work = thread->transaction_stack == NULL &&
2500 list_empty(&thread->todo);
2501
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002502 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2503 if (wait_for_proc_work)
2504 proc->ready_threads++;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002505
2506 binder_unlock(__func__);
2507
2508 trace_binder_wait_for_work(wait_for_proc_work,
2509 !!thread->transaction_stack,
2510 !list_empty(&thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002511 if (wait_for_proc_work) {
2512 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2513 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302514 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002515 proc->pid, thread->pid, thread->looper);
2516 wait_event_interruptible(binder_user_error_wait,
2517 binder_stop_on_user_error < 2);
2518 }
2519 binder_set_nice(proc->default_priority);
2520 if (non_block) {
2521 if (!binder_has_proc_work(proc, thread))
2522 ret = -EAGAIN;
2523 } else
Colin Crosse2610b22013-05-06 23:50:15 +00002524 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002525 } else {
2526 if (non_block) {
2527 if (!binder_has_thread_work(thread))
2528 ret = -EAGAIN;
2529 } else
Colin Crosse2610b22013-05-06 23:50:15 +00002530 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002531 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002532
2533 binder_lock(__func__);
2534
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002535 if (wait_for_proc_work)
2536 proc->ready_threads--;
2537 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2538
2539 if (ret)
2540 return ret;
2541
2542 while (1) {
2543 uint32_t cmd;
2544 struct binder_transaction_data tr;
2545 struct binder_work *w;
2546 struct binder_transaction *t = NULL;
Todd Kjos7a4408c2017-06-29 12:01:57 -07002547 struct binder_thread *t_from;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002548
Dmitry Voytik395262a2014-09-08 18:16:34 +04002549 if (!list_empty(&thread->todo)) {
2550 w = list_first_entry(&thread->todo, struct binder_work,
2551 entry);
2552 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2553 w = list_first_entry(&proc->todo, struct binder_work,
2554 entry);
2555 } else {
2556 /* no data added */
Todd Kjos08dabce2017-06-29 12:01:49 -07002557 if (ptr - buffer == 4 && !thread->looper_need_return)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002558 goto retry;
2559 break;
2560 }
2561
2562 if (end - ptr < sizeof(tr) + 4)
2563 break;
2564
2565 switch (w->type) {
2566 case BINDER_WORK_TRANSACTION: {
2567 t = container_of(w, struct binder_transaction, work);
2568 } break;
Todd Kjos26549d12017-06-29 12:01:55 -07002569 case BINDER_WORK_RETURN_ERROR: {
2570 struct binder_error *e = container_of(
2571 w, struct binder_error, work);
2572
2573 WARN_ON(e->cmd == BR_OK);
2574 if (put_user(e->cmd, (uint32_t __user *)ptr))
2575 return -EFAULT;
2576 e->cmd = BR_OK;
2577 ptr += sizeof(uint32_t);
2578
2579 binder_stat_br(proc, thread, cmd);
2580 list_del(&w->entry);
2581 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002582 case BINDER_WORK_TRANSACTION_COMPLETE: {
2583 cmd = BR_TRANSACTION_COMPLETE;
2584 if (put_user(cmd, (uint32_t __user *)ptr))
2585 return -EFAULT;
2586 ptr += sizeof(uint32_t);
2587
2588 binder_stat_br(proc, thread, cmd);
2589 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302590 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002591 proc->pid, thread->pid);
2592
2593 list_del(&w->entry);
2594 kfree(w);
2595 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2596 } break;
2597 case BINDER_WORK_NODE: {
2598 struct binder_node *node = container_of(w, struct binder_node, work);
Todd Kjos26b47d82017-06-29 12:01:47 -07002599 int strong, weak;
2600 binder_uintptr_t node_ptr = node->ptr;
2601 binder_uintptr_t node_cookie = node->cookie;
2602 int node_debug_id = node->debug_id;
2603 int has_weak_ref;
2604 int has_strong_ref;
2605 void __user *orig_ptr = ptr;
Seunghun Lee10f62862014-05-01 01:30:23 +09002606
Todd Kjos26b47d82017-06-29 12:01:47 -07002607 BUG_ON(proc != node->proc);
2608 strong = node->internal_strong_refs ||
2609 node->local_strong_refs;
2610 weak = !hlist_empty(&node->refs) ||
2611 node->local_weak_refs || strong;
2612 has_strong_ref = node->has_strong_ref;
2613 has_weak_ref = node->has_weak_ref;
2614
2615 if (weak && !has_weak_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002616 node->has_weak_ref = 1;
2617 node->pending_weak_ref = 1;
2618 node->local_weak_refs++;
Todd Kjos26b47d82017-06-29 12:01:47 -07002619 }
2620 if (strong && !has_strong_ref) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002621 node->has_strong_ref = 1;
2622 node->pending_strong_ref = 1;
2623 node->local_strong_refs++;
Todd Kjos26b47d82017-06-29 12:01:47 -07002624 }
2625 if (!strong && has_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002626 node->has_strong_ref = 0;
Todd Kjos26b47d82017-06-29 12:01:47 -07002627 if (!weak && has_weak_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002628 node->has_weak_ref = 0;
Todd Kjos26b47d82017-06-29 12:01:47 -07002629 list_del(&w->entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002630
Todd Kjos26b47d82017-06-29 12:01:47 -07002631 if (!weak && !strong) {
2632 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2633 "%d:%d node %d u%016llx c%016llx deleted\n",
2634 proc->pid, thread->pid,
2635 node_debug_id,
2636 (u64)node_ptr,
2637 (u64)node_cookie);
2638 rb_erase(&node->rb_node, &proc->nodes);
2639 kfree(node);
2640 binder_stats_deleted(BINDER_STAT_NODE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002641 }
Todd Kjos26b47d82017-06-29 12:01:47 -07002642 if (weak && !has_weak_ref)
2643 ret = binder_put_node_cmd(
2644 proc, thread, &ptr, node_ptr,
2645 node_cookie, node_debug_id,
2646 BR_INCREFS, "BR_INCREFS");
2647 if (!ret && strong && !has_strong_ref)
2648 ret = binder_put_node_cmd(
2649 proc, thread, &ptr, node_ptr,
2650 node_cookie, node_debug_id,
2651 BR_ACQUIRE, "BR_ACQUIRE");
2652 if (!ret && !strong && has_strong_ref)
2653 ret = binder_put_node_cmd(
2654 proc, thread, &ptr, node_ptr,
2655 node_cookie, node_debug_id,
2656 BR_RELEASE, "BR_RELEASE");
2657 if (!ret && !weak && has_weak_ref)
2658 ret = binder_put_node_cmd(
2659 proc, thread, &ptr, node_ptr,
2660 node_cookie, node_debug_id,
2661 BR_DECREFS, "BR_DECREFS");
2662 if (orig_ptr == ptr)
2663 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2664 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2665 proc->pid, thread->pid,
2666 node_debug_id,
2667 (u64)node_ptr,
2668 (u64)node_cookie);
2669 if (ret)
2670 return ret;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002671 } break;
2672 case BINDER_WORK_DEAD_BINDER:
2673 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2674 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2675 struct binder_ref_death *death;
2676 uint32_t cmd;
2677
2678 death = container_of(w, struct binder_ref_death, work);
2679 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2680 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2681 else
2682 cmd = BR_DEAD_BINDER;
2683 if (put_user(cmd, (uint32_t __user *)ptr))
2684 return -EFAULT;
2685 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08002686 if (put_user(death->cookie,
2687 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002688 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002689 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002690 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002691 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002692 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002693 proc->pid, thread->pid,
2694 cmd == BR_DEAD_BINDER ?
2695 "BR_DEAD_BINDER" :
2696 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002697 (u64)death->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002698
2699 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2700 list_del(&w->entry);
2701 kfree(death);
2702 binder_stats_deleted(BINDER_STAT_DEATH);
2703 } else
2704 list_move(&w->entry, &proc->delivered_death);
2705 if (cmd == BR_DEAD_BINDER)
2706 goto done; /* DEAD_BINDER notifications can cause transactions */
2707 } break;
2708 }
2709
2710 if (!t)
2711 continue;
2712
2713 BUG_ON(t->buffer == NULL);
2714 if (t->buffer->target_node) {
2715 struct binder_node *target_node = t->buffer->target_node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002716
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002717 tr.target.ptr = target_node->ptr;
2718 tr.cookie = target_node->cookie;
2719 t->saved_priority = task_nice(current);
2720 if (t->priority < target_node->min_priority &&
2721 !(t->flags & TF_ONE_WAY))
2722 binder_set_nice(t->priority);
2723 else if (!(t->flags & TF_ONE_WAY) ||
2724 t->saved_priority > target_node->min_priority)
2725 binder_set_nice(target_node->min_priority);
2726 cmd = BR_TRANSACTION;
2727 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002728 tr.target.ptr = 0;
2729 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002730 cmd = BR_REPLY;
2731 }
2732 tr.code = t->code;
2733 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06002734 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002735
Todd Kjos7a4408c2017-06-29 12:01:57 -07002736 t_from = binder_get_txn_from(t);
2737 if (t_from) {
2738 struct task_struct *sender = t_from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09002739
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002740 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08002741 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002742 } else {
2743 tr.sender_pid = 0;
2744 }
2745
2746 tr.data_size = t->buffer->data_size;
2747 tr.offsets_size = t->buffer->offsets_size;
Todd Kjos19c98722017-06-29 12:01:40 -07002748 tr.data.ptr.buffer = (binder_uintptr_t)
2749 ((uintptr_t)t->buffer->data +
2750 binder_alloc_get_user_buffer_offset(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002751 tr.data.ptr.offsets = tr.data.ptr.buffer +
2752 ALIGN(t->buffer->data_size,
2753 sizeof(void *));
2754
Todd Kjos7a4408c2017-06-29 12:01:57 -07002755 if (put_user(cmd, (uint32_t __user *)ptr)) {
2756 if (t_from)
2757 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002758 return -EFAULT;
Todd Kjos7a4408c2017-06-29 12:01:57 -07002759 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002760 ptr += sizeof(uint32_t);
Todd Kjos7a4408c2017-06-29 12:01:57 -07002761 if (copy_to_user(ptr, &tr, sizeof(tr))) {
2762 if (t_from)
2763 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002764 return -EFAULT;
Todd Kjos7a4408c2017-06-29 12:01:57 -07002765 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002766 ptr += sizeof(tr);
2767
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002768 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002769 binder_stat_br(proc, thread, cmd);
2770 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002771 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002772 proc->pid, thread->pid,
2773 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2774 "BR_REPLY",
Todd Kjos7a4408c2017-06-29 12:01:57 -07002775 t->debug_id, t_from ? t_from->proc->pid : 0,
2776 t_from ? t_from->pid : 0, cmd,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002777 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002778 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002779
Todd Kjos7a4408c2017-06-29 12:01:57 -07002780 if (t_from)
2781 binder_thread_dec_tmpref(t_from);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002782 list_del(&t->work.entry);
2783 t->buffer->allow_user_free = 1;
2784 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2785 t->to_parent = thread->transaction_stack;
2786 t->to_thread = thread;
2787 thread->transaction_stack = t;
2788 } else {
Todd Kjosb6d282c2017-06-29 12:01:54 -07002789 binder_free_transaction(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002790 }
2791 break;
2792 }
2793
2794done:
2795
2796 *consumed = ptr - buffer;
2797 if (proc->requested_threads + proc->ready_threads == 0 &&
2798 proc->requested_threads_started < proc->max_threads &&
2799 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2800 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2801 /*spawn a new thread if we leave this out */) {
2802 proc->requested_threads++;
2803 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302804 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002805 proc->pid, thread->pid);
2806 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2807 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002808 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002809 }
2810 return 0;
2811}
2812
2813static void binder_release_work(struct list_head *list)
2814{
2815 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09002816
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002817 while (!list_empty(list)) {
2818 w = list_first_entry(list, struct binder_work, entry);
2819 list_del_init(&w->entry);
2820 switch (w->type) {
2821 case BINDER_WORK_TRANSACTION: {
2822 struct binder_transaction *t;
2823
2824 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002825 if (t->buffer->target_node &&
2826 !(t->flags & TF_ONE_WAY)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002827 binder_send_failed_reply(t, BR_DEAD_REPLY);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002828 } else {
2829 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302830 "undelivered transaction %d\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002831 t->debug_id);
Todd Kjosb6d282c2017-06-29 12:01:54 -07002832 binder_free_transaction(t);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002833 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002834 } break;
Todd Kjos26549d12017-06-29 12:01:55 -07002835 case BINDER_WORK_RETURN_ERROR: {
2836 struct binder_error *e = container_of(
2837 w, struct binder_error, work);
2838
2839 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2840 "undelivered TRANSACTION_ERROR: %u\n",
2841 e->cmd);
2842 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002843 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002844 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302845 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002846 kfree(w);
2847 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2848 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002849 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2850 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2851 struct binder_ref_death *death;
2852
2853 death = container_of(w, struct binder_ref_death, work);
2854 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002855 "undelivered death notification, %016llx\n",
2856 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002857 kfree(death);
2858 binder_stats_deleted(BINDER_STAT_DEATH);
2859 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002860 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302861 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002862 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002863 break;
2864 }
2865 }
2866
2867}
2868
2869static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2870{
2871 struct binder_thread *thread = NULL;
2872 struct rb_node *parent = NULL;
2873 struct rb_node **p = &proc->threads.rb_node;
2874
2875 while (*p) {
2876 parent = *p;
2877 thread = rb_entry(parent, struct binder_thread, rb_node);
2878
2879 if (current->pid < thread->pid)
2880 p = &(*p)->rb_left;
2881 else if (current->pid > thread->pid)
2882 p = &(*p)->rb_right;
2883 else
2884 break;
2885 }
2886 if (*p == NULL) {
2887 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2888 if (thread == NULL)
2889 return NULL;
2890 binder_stats_created(BINDER_STAT_THREAD);
2891 thread->proc = proc;
2892 thread->pid = current->pid;
Todd Kjos7a4408c2017-06-29 12:01:57 -07002893 atomic_set(&thread->tmp_ref, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002894 init_waitqueue_head(&thread->wait);
2895 INIT_LIST_HEAD(&thread->todo);
2896 rb_link_node(&thread->rb_node, parent, p);
2897 rb_insert_color(&thread->rb_node, &proc->threads);
Todd Kjos08dabce2017-06-29 12:01:49 -07002898 thread->looper_need_return = true;
Todd Kjos26549d12017-06-29 12:01:55 -07002899 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
2900 thread->return_error.cmd = BR_OK;
2901 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
2902 thread->reply_error.cmd = BR_OK;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002903 }
2904 return thread;
2905}
2906
Todd Kjos7a4408c2017-06-29 12:01:57 -07002907static void binder_free_proc(struct binder_proc *proc)
2908{
2909 BUG_ON(!list_empty(&proc->todo));
2910 BUG_ON(!list_empty(&proc->delivered_death));
2911 binder_alloc_deferred_release(&proc->alloc);
2912 put_task_struct(proc->tsk);
2913 binder_stats_deleted(BINDER_STAT_PROC);
2914 kfree(proc);
2915}
2916
2917static void binder_free_thread(struct binder_thread *thread)
2918{
2919 BUG_ON(!list_empty(&thread->todo));
2920 binder_stats_deleted(BINDER_STAT_THREAD);
2921 binder_proc_dec_tmpref(thread->proc);
2922 kfree(thread);
2923}
2924
2925static int binder_thread_release(struct binder_proc *proc,
2926 struct binder_thread *thread)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002927{
2928 struct binder_transaction *t;
2929 struct binder_transaction *send_reply = NULL;
2930 int active_transactions = 0;
Todd Kjos7a4408c2017-06-29 12:01:57 -07002931 struct binder_transaction *last_t = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002932
Todd Kjos7a4408c2017-06-29 12:01:57 -07002933 /*
2934 * take a ref on the proc so it survives
2935 * after we remove this thread from proc->threads.
2936 * The corresponding dec is when we actually
2937 * free the thread in binder_free_thread()
2938 */
2939 proc->tmp_ref++;
2940 /*
2941 * take a ref on this thread to ensure it
2942 * survives while we are releasing it
2943 */
2944 atomic_inc(&thread->tmp_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002945 rb_erase(&thread->rb_node, &proc->threads);
2946 t = thread->transaction_stack;
Todd Kjos7a4408c2017-06-29 12:01:57 -07002947 if (t) {
2948 spin_lock(&t->lock);
2949 if (t->to_thread == thread)
2950 send_reply = t;
2951 }
2952 thread->is_dead = true;
2953
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002954 while (t) {
Todd Kjos7a4408c2017-06-29 12:01:57 -07002955 last_t = t;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002956 active_transactions++;
2957 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302958 "release %d:%d transaction %d %s, still active\n",
2959 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002960 t->debug_id,
2961 (t->to_thread == thread) ? "in" : "out");
2962
2963 if (t->to_thread == thread) {
2964 t->to_proc = NULL;
2965 t->to_thread = NULL;
2966 if (t->buffer) {
2967 t->buffer->transaction = NULL;
2968 t->buffer = NULL;
2969 }
2970 t = t->to_parent;
2971 } else if (t->from == thread) {
2972 t->from = NULL;
2973 t = t->from_parent;
2974 } else
2975 BUG();
Todd Kjos7a4408c2017-06-29 12:01:57 -07002976 spin_unlock(&last_t->lock);
2977 if (t)
2978 spin_lock(&t->lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002979 }
Todd Kjos7a4408c2017-06-29 12:01:57 -07002980
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002981 if (send_reply)
2982 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2983 binder_release_work(&thread->todo);
Todd Kjos7a4408c2017-06-29 12:01:57 -07002984 binder_thread_dec_tmpref(thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002985 return active_transactions;
2986}
2987
2988static unsigned int binder_poll(struct file *filp,
2989 struct poll_table_struct *wait)
2990{
2991 struct binder_proc *proc = filp->private_data;
2992 struct binder_thread *thread = NULL;
2993 int wait_for_proc_work;
2994
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002995 binder_lock(__func__);
2996
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002997 thread = binder_get_thread(proc);
2998
2999 wait_for_proc_work = thread->transaction_stack == NULL &&
Todd Kjos26549d12017-06-29 12:01:55 -07003000 list_empty(&thread->todo);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003001
3002 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003003
3004 if (wait_for_proc_work) {
3005 if (binder_has_proc_work(proc, thread))
3006 return POLLIN;
3007 poll_wait(filp, &proc->wait, wait);
3008 if (binder_has_proc_work(proc, thread))
3009 return POLLIN;
3010 } else {
3011 if (binder_has_thread_work(thread))
3012 return POLLIN;
3013 poll_wait(filp, &thread->wait, wait);
3014 if (binder_has_thread_work(thread))
3015 return POLLIN;
3016 }
3017 return 0;
3018}
3019
Tair Rzayev78260ac2014-06-03 22:27:21 +03003020static int binder_ioctl_write_read(struct file *filp,
3021 unsigned int cmd, unsigned long arg,
3022 struct binder_thread *thread)
3023{
3024 int ret = 0;
3025 struct binder_proc *proc = filp->private_data;
3026 unsigned int size = _IOC_SIZE(cmd);
3027 void __user *ubuf = (void __user *)arg;
3028 struct binder_write_read bwr;
3029
3030 if (size != sizeof(struct binder_write_read)) {
3031 ret = -EINVAL;
3032 goto out;
3033 }
3034 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
3035 ret = -EFAULT;
3036 goto out;
3037 }
3038 binder_debug(BINDER_DEBUG_READ_WRITE,
3039 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3040 proc->pid, thread->pid,
3041 (u64)bwr.write_size, (u64)bwr.write_buffer,
3042 (u64)bwr.read_size, (u64)bwr.read_buffer);
3043
3044 if (bwr.write_size > 0) {
3045 ret = binder_thread_write(proc, thread,
3046 bwr.write_buffer,
3047 bwr.write_size,
3048 &bwr.write_consumed);
3049 trace_binder_write_done(ret);
3050 if (ret < 0) {
3051 bwr.read_consumed = 0;
3052 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3053 ret = -EFAULT;
3054 goto out;
3055 }
3056 }
3057 if (bwr.read_size > 0) {
3058 ret = binder_thread_read(proc, thread, bwr.read_buffer,
3059 bwr.read_size,
3060 &bwr.read_consumed,
3061 filp->f_flags & O_NONBLOCK);
3062 trace_binder_read_done(ret);
3063 if (!list_empty(&proc->todo))
3064 wake_up_interruptible(&proc->wait);
3065 if (ret < 0) {
3066 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3067 ret = -EFAULT;
3068 goto out;
3069 }
3070 }
3071 binder_debug(BINDER_DEBUG_READ_WRITE,
3072 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3073 proc->pid, thread->pid,
3074 (u64)bwr.write_consumed, (u64)bwr.write_size,
3075 (u64)bwr.read_consumed, (u64)bwr.read_size);
3076 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
3077 ret = -EFAULT;
3078 goto out;
3079 }
3080out:
3081 return ret;
3082}
3083
3084static int binder_ioctl_set_ctx_mgr(struct file *filp)
3085{
3086 int ret = 0;
3087 struct binder_proc *proc = filp->private_data;
Martijn Coenen342e5c92017-02-03 14:40:46 -08003088 struct binder_context *context = proc->context;
Todd Kjosc44b1232017-06-29 12:01:43 -07003089 struct binder_node *new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03003090 kuid_t curr_euid = current_euid();
3091
Todd Kjosc44b1232017-06-29 12:01:43 -07003092 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen342e5c92017-02-03 14:40:46 -08003093 if (context->binder_context_mgr_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03003094 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3095 ret = -EBUSY;
3096 goto out;
3097 }
Stephen Smalley79af7302015-01-21 10:54:10 -05003098 ret = security_binder_set_context_mgr(proc->tsk);
3099 if (ret < 0)
3100 goto out;
Martijn Coenen342e5c92017-02-03 14:40:46 -08003101 if (uid_valid(context->binder_context_mgr_uid)) {
3102 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03003103 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3104 from_kuid(&init_user_ns, curr_euid),
3105 from_kuid(&init_user_ns,
Martijn Coenen342e5c92017-02-03 14:40:46 -08003106 context->binder_context_mgr_uid));
Tair Rzayev78260ac2014-06-03 22:27:21 +03003107 ret = -EPERM;
3108 goto out;
3109 }
3110 } else {
Martijn Coenen342e5c92017-02-03 14:40:46 -08003111 context->binder_context_mgr_uid = curr_euid;
Tair Rzayev78260ac2014-06-03 22:27:21 +03003112 }
Todd Kjosc44b1232017-06-29 12:01:43 -07003113 new_node = binder_new_node(proc, 0, 0);
3114 if (!new_node) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03003115 ret = -ENOMEM;
3116 goto out;
3117 }
Todd Kjosc44b1232017-06-29 12:01:43 -07003118 new_node->local_weak_refs++;
3119 new_node->local_strong_refs++;
3120 new_node->has_strong_ref = 1;
3121 new_node->has_weak_ref = 1;
3122 context->binder_context_mgr_node = new_node;
Tair Rzayev78260ac2014-06-03 22:27:21 +03003123out:
Todd Kjosc44b1232017-06-29 12:01:43 -07003124 mutex_unlock(&context->context_mgr_node_lock);
Tair Rzayev78260ac2014-06-03 22:27:21 +03003125 return ret;
3126}
3127
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003128static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3129{
3130 int ret;
3131 struct binder_proc *proc = filp->private_data;
3132 struct binder_thread *thread;
3133 unsigned int size = _IOC_SIZE(cmd);
3134 void __user *ubuf = (void __user *)arg;
3135
Tair Rzayev78260ac2014-06-03 22:27:21 +03003136 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
3137 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003138
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003139 trace_binder_ioctl(cmd, arg);
3140
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003141 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3142 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003143 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003144
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003145 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003146 thread = binder_get_thread(proc);
3147 if (thread == NULL) {
3148 ret = -ENOMEM;
3149 goto err;
3150 }
3151
3152 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03003153 case BINDER_WRITE_READ:
3154 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
3155 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003156 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003157 break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003158 case BINDER_SET_MAX_THREADS:
3159 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
3160 ret = -EINVAL;
3161 goto err;
3162 }
3163 break;
3164 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03003165 ret = binder_ioctl_set_ctx_mgr(filp);
3166 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003167 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003168 break;
3169 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05303170 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003171 proc->pid, thread->pid);
Todd Kjos7a4408c2017-06-29 12:01:57 -07003172 binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003173 thread = NULL;
3174 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02003175 case BINDER_VERSION: {
3176 struct binder_version __user *ver = ubuf;
3177
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003178 if (size != sizeof(struct binder_version)) {
3179 ret = -EINVAL;
3180 goto err;
3181 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02003182 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
3183 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003184 ret = -EINVAL;
3185 goto err;
3186 }
3187 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02003188 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003189 default:
3190 ret = -EINVAL;
3191 goto err;
3192 }
3193 ret = 0;
3194err:
3195 if (thread)
Todd Kjos08dabce2017-06-29 12:01:49 -07003196 thread->looper_need_return = false;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003197 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003198 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3199 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05303200 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003201err_unlocked:
3202 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003203 return ret;
3204}
3205
3206static void binder_vma_open(struct vm_area_struct *vma)
3207{
3208 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09003209
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003210 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303211 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003212 proc->pid, vma->vm_start, vma->vm_end,
3213 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3214 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003215}
3216
3217static void binder_vma_close(struct vm_area_struct *vma)
3218{
3219 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09003220
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003221 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05303222 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003223 proc->pid, vma->vm_start, vma->vm_end,
3224 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3225 (unsigned long)pgprot_val(vma->vm_page_prot));
Todd Kjos19c98722017-06-29 12:01:40 -07003226 binder_alloc_vma_close(&proc->alloc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003227 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3228}
3229
Dave Jiang11bac802017-02-24 14:56:41 -08003230static int binder_vm_fault(struct vm_fault *vmf)
Vinayak Menonddac7d52014-06-02 18:17:59 +05303231{
3232 return VM_FAULT_SIGBUS;
3233}
3234
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003235static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003236 .open = binder_vma_open,
3237 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05303238 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003239};
3240
Todd Kjos19c98722017-06-29 12:01:40 -07003241static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3242{
3243 int ret;
3244 struct binder_proc *proc = filp->private_data;
3245 const char *failure_string;
3246
3247 if (proc->tsk != current->group_leader)
3248 return -EINVAL;
3249
3250 if ((vma->vm_end - vma->vm_start) > SZ_4M)
3251 vma->vm_end = vma->vm_start + SZ_4M;
3252
3253 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3254 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3255 __func__, proc->pid, vma->vm_start, vma->vm_end,
3256 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3257 (unsigned long)pgprot_val(vma->vm_page_prot));
3258
3259 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3260 ret = -EPERM;
3261 failure_string = "bad vm_flags";
3262 goto err_bad_arg;
3263 }
3264 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3265 vma->vm_ops = &binder_vm_ops;
3266 vma->vm_private_data = proc;
3267
3268 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
3269 if (ret)
3270 return ret;
3271 proc->files = get_files_struct(current);
3272 return 0;
3273
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003274err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04003275 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003276 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3277 return ret;
3278}
3279
3280static int binder_open(struct inode *nodp, struct file *filp)
3281{
3282 struct binder_proc *proc;
Martijn Coenenac4812c2017-02-03 14:40:48 -08003283 struct binder_device *binder_dev;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003284
3285 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3286 current->group_leader->pid, current->pid);
3287
3288 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3289 if (proc == NULL)
3290 return -ENOMEM;
Todd Kjosc4ea41b2017-06-29 12:01:36 -07003291 get_task_struct(current->group_leader);
3292 proc->tsk = current->group_leader;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003293 INIT_LIST_HEAD(&proc->todo);
3294 init_waitqueue_head(&proc->wait);
3295 proc->default_priority = task_nice(current);
Martijn Coenenac4812c2017-02-03 14:40:48 -08003296 binder_dev = container_of(filp->private_data, struct binder_device,
3297 miscdev);
3298 proc->context = &binder_dev->context;
Todd Kjos19c98722017-06-29 12:01:40 -07003299 binder_alloc_init(&proc->alloc);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003300
3301 binder_lock(__func__);
3302
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003303 binder_stats_created(BINDER_STAT_PROC);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003304 proc->pid = current->group_leader->pid;
3305 INIT_LIST_HEAD(&proc->delivered_death);
3306 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003307
3308 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003309
Todd Kjosc44b1232017-06-29 12:01:43 -07003310 mutex_lock(&binder_procs_lock);
3311 hlist_add_head(&proc->proc_node, &binder_procs);
3312 mutex_unlock(&binder_procs_lock);
3313
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003314 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003315 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09003316
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003317 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Martijn Coenen14db3182017-02-03 14:40:47 -08003318 /*
3319 * proc debug entries are shared between contexts, so
3320 * this will fail if the process tries to open the driver
3321 * again with a different context. The priting code will
3322 * anyway print all contexts that a given PID has, so this
3323 * is not a problem.
3324 */
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003325 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
Martijn Coenen14db3182017-02-03 14:40:47 -08003326 binder_debugfs_dir_entry_proc,
3327 (void *)(unsigned long)proc->pid,
3328 &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003329 }
3330
3331 return 0;
3332}
3333
3334static int binder_flush(struct file *filp, fl_owner_t id)
3335{
3336 struct binder_proc *proc = filp->private_data;
3337
3338 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3339
3340 return 0;
3341}
3342
3343static void binder_deferred_flush(struct binder_proc *proc)
3344{
3345 struct rb_node *n;
3346 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09003347
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003348 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3349 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09003350
Todd Kjos08dabce2017-06-29 12:01:49 -07003351 thread->looper_need_return = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003352 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3353 wake_up_interruptible(&thread->wait);
3354 wake_count++;
3355 }
3356 }
3357 wake_up_interruptible_all(&proc->wait);
3358
3359 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3360 "binder_flush: %d woke %d threads\n", proc->pid,
3361 wake_count);
3362}
3363
3364static int binder_release(struct inode *nodp, struct file *filp)
3365{
3366 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09003367
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003368 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003369 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3370
3371 return 0;
3372}
3373
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003374static int binder_node_release(struct binder_node *node, int refs)
3375{
3376 struct binder_ref *ref;
3377 int death = 0;
3378
3379 list_del_init(&node->work.entry);
3380 binder_release_work(&node->async_todo);
3381
3382 if (hlist_empty(&node->refs)) {
3383 kfree(node);
3384 binder_stats_deleted(BINDER_STAT_NODE);
3385
3386 return refs;
3387 }
3388
3389 node->proc = NULL;
3390 node->local_strong_refs = 0;
3391 node->local_weak_refs = 0;
Todd Kjosc44b1232017-06-29 12:01:43 -07003392
3393 spin_lock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003394 hlist_add_head(&node->dead_node, &binder_dead_nodes);
Todd Kjosc44b1232017-06-29 12:01:43 -07003395 spin_unlock(&binder_dead_nodes_lock);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003396
3397 hlist_for_each_entry(ref, &node->refs, node_entry) {
3398 refs++;
3399
3400 if (!ref->death)
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08003401 continue;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003402
3403 death++;
3404
3405 if (list_empty(&ref->death->work.entry)) {
3406 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3407 list_add_tail(&ref->death->work.entry,
3408 &ref->proc->todo);
3409 wake_up_interruptible(&ref->proc->wait);
3410 } else
3411 BUG();
3412 }
3413
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003414 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3415 "node %d now dead, refs %d, death %d\n",
3416 node->debug_id, refs, death);
3417
3418 return refs;
3419}
3420
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003421static void binder_deferred_release(struct binder_proc *proc)
3422{
Martijn Coenen342e5c92017-02-03 14:40:46 -08003423 struct binder_context *context = proc->context;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003424 struct rb_node *n;
Todd Kjos19c98722017-06-29 12:01:40 -07003425 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003426
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003427 BUG_ON(proc->files);
3428
Todd Kjosc44b1232017-06-29 12:01:43 -07003429 mutex_lock(&binder_procs_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003430 hlist_del(&proc->proc_node);
Todd Kjosc44b1232017-06-29 12:01:43 -07003431 mutex_unlock(&binder_procs_lock);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003432
Todd Kjosc44b1232017-06-29 12:01:43 -07003433 mutex_lock(&context->context_mgr_node_lock);
Martijn Coenen342e5c92017-02-03 14:40:46 -08003434 if (context->binder_context_mgr_node &&
3435 context->binder_context_mgr_node->proc == proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003436 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01003437 "%s: %d context_mgr_node gone\n",
3438 __func__, proc->pid);
Martijn Coenen342e5c92017-02-03 14:40:46 -08003439 context->binder_context_mgr_node = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003440 }
Todd Kjosc44b1232017-06-29 12:01:43 -07003441 mutex_unlock(&context->context_mgr_node_lock);
Todd Kjos7a4408c2017-06-29 12:01:57 -07003442 /*
3443 * Make sure proc stays alive after we
3444 * remove all the threads
3445 */
3446 proc->tmp_ref++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003447
Todd Kjos7a4408c2017-06-29 12:01:57 -07003448 proc->is_dead = true;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003449 threads = 0;
3450 active_transactions = 0;
3451 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003452 struct binder_thread *thread;
3453
3454 thread = rb_entry(n, struct binder_thread, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003455 threads++;
Todd Kjos7a4408c2017-06-29 12:01:57 -07003456 active_transactions += binder_thread_release(proc, thread);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003457 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003458
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003459 nodes = 0;
3460 incoming_refs = 0;
3461 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003462 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003463
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003464 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003465 nodes++;
3466 rb_erase(&node->rb_node, &proc->nodes);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003467 incoming_refs = binder_node_release(node, incoming_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003468 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003469
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003470 outgoing_refs = 0;
3471 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003472 struct binder_ref *ref;
3473
3474 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003475 outgoing_refs++;
3476 binder_delete_ref(ref);
3477 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003478
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003479 binder_release_work(&proc->todo);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003480 binder_release_work(&proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003481
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003482 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Todd Kjos19c98722017-06-29 12:01:40 -07003483 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01003484 __func__, proc->pid, threads, nodes, incoming_refs,
Todd Kjos19c98722017-06-29 12:01:40 -07003485 outgoing_refs, active_transactions);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003486
Todd Kjos7a4408c2017-06-29 12:01:57 -07003487 binder_proc_dec_tmpref(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003488}
3489
3490static void binder_deferred_func(struct work_struct *work)
3491{
3492 struct binder_proc *proc;
3493 struct files_struct *files;
3494
3495 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09003496
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003497 do {
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003498 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003499 mutex_lock(&binder_deferred_lock);
3500 if (!hlist_empty(&binder_deferred_list)) {
3501 proc = hlist_entry(binder_deferred_list.first,
3502 struct binder_proc, deferred_work_node);
3503 hlist_del_init(&proc->deferred_work_node);
3504 defer = proc->deferred_work;
3505 proc->deferred_work = 0;
3506 } else {
3507 proc = NULL;
3508 defer = 0;
3509 }
3510 mutex_unlock(&binder_deferred_lock);
3511
3512 files = NULL;
3513 if (defer & BINDER_DEFERRED_PUT_FILES) {
3514 files = proc->files;
3515 if (files)
3516 proc->files = NULL;
3517 }
3518
3519 if (defer & BINDER_DEFERRED_FLUSH)
3520 binder_deferred_flush(proc);
3521
3522 if (defer & BINDER_DEFERRED_RELEASE)
3523 binder_deferred_release(proc); /* frees proc */
3524
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003525 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003526 if (files)
3527 put_files_struct(files);
3528 } while (proc);
3529}
3530static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3531
3532static void
3533binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3534{
3535 mutex_lock(&binder_deferred_lock);
3536 proc->deferred_work |= defer;
3537 if (hlist_unhashed(&proc->deferred_work_node)) {
3538 hlist_add_head(&proc->deferred_work_node,
3539 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05303540 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003541 }
3542 mutex_unlock(&binder_deferred_lock);
3543}
3544
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003545static void print_binder_transaction(struct seq_file *m, const char *prefix,
3546 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003547{
Todd Kjos7a4408c2017-06-29 12:01:57 -07003548 spin_lock(&t->lock);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003549 seq_printf(m,
3550 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3551 prefix, t->debug_id, t,
3552 t->from ? t->from->proc->pid : 0,
3553 t->from ? t->from->pid : 0,
3554 t->to_proc ? t->to_proc->pid : 0,
3555 t->to_thread ? t->to_thread->pid : 0,
3556 t->code, t->flags, t->priority, t->need_reply);
Todd Kjos7a4408c2017-06-29 12:01:57 -07003557 spin_unlock(&t->lock);
3558
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003559 if (t->buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003560 seq_puts(m, " buffer free\n");
3561 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003562 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003563 if (t->buffer->target_node)
3564 seq_printf(m, " node %d",
3565 t->buffer->target_node->debug_id);
3566 seq_printf(m, " size %zd:%zd data %p\n",
3567 t->buffer->data_size, t->buffer->offsets_size,
3568 t->buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003569}
3570
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003571static void print_binder_work(struct seq_file *m, const char *prefix,
3572 const char *transaction_prefix,
3573 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003574{
3575 struct binder_node *node;
3576 struct binder_transaction *t;
3577
3578 switch (w->type) {
3579 case BINDER_WORK_TRANSACTION:
3580 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003581 print_binder_transaction(m, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003582 break;
Todd Kjos26549d12017-06-29 12:01:55 -07003583 case BINDER_WORK_RETURN_ERROR: {
3584 struct binder_error *e = container_of(
3585 w, struct binder_error, work);
3586
3587 seq_printf(m, "%stransaction error: %u\n",
3588 prefix, e->cmd);
3589 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003590 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003591 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003592 break;
3593 case BINDER_WORK_NODE:
3594 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003595 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3596 prefix, node->debug_id,
3597 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003598 break;
3599 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003600 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003601 break;
3602 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003603 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003604 break;
3605 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003606 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003607 break;
3608 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003609 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003610 break;
3611 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003612}
3613
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003614static void print_binder_thread(struct seq_file *m,
3615 struct binder_thread *thread,
3616 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003617{
3618 struct binder_transaction *t;
3619 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003620 size_t start_pos = m->count;
3621 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003622
Todd Kjos7a4408c2017-06-29 12:01:57 -07003623 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
Todd Kjos08dabce2017-06-29 12:01:49 -07003624 thread->pid, thread->looper,
Todd Kjos7a4408c2017-06-29 12:01:57 -07003625 thread->looper_need_return,
3626 atomic_read(&thread->tmp_ref));
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003627 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003628 t = thread->transaction_stack;
3629 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003630 if (t->from == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003631 print_binder_transaction(m,
3632 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003633 t = t->from_parent;
3634 } else if (t->to_thread == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003635 print_binder_transaction(m,
3636 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003637 t = t->to_parent;
3638 } else {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003639 print_binder_transaction(m, " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003640 t = NULL;
3641 }
3642 }
3643 list_for_each_entry(w, &thread->todo, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003644 print_binder_work(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003645 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003646 if (!print_always && m->count == header_pos)
3647 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003648}
3649
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003650static void print_binder_node(struct seq_file *m, struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003651{
3652 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003653 struct binder_work *w;
3654 int count;
3655
3656 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08003657 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003658 count++;
3659
Arve Hjønnevågda498892014-02-21 14:40:26 -08003660 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3661 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003662 node->has_strong_ref, node->has_weak_ref,
3663 node->local_strong_refs, node->local_weak_refs,
3664 node->internal_strong_refs, count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003665 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003666 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08003667 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003668 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003669 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003670 seq_puts(m, "\n");
3671 list_for_each_entry(w, &node->async_todo, entry)
3672 print_binder_work(m, " ",
3673 " pending async transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003674}
3675
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003676static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003677{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003678 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3679 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3680 ref->node->debug_id, ref->strong, ref->weak, ref->death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003681}
3682
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003683static void print_binder_proc(struct seq_file *m,
3684 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003685{
3686 struct binder_work *w;
3687 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003688 size_t start_pos = m->count;
3689 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003690
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003691 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen14db3182017-02-03 14:40:47 -08003692 seq_printf(m, "context %s\n", proc->context->name);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003693 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003694
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003695 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3696 print_binder_thread(m, rb_entry(n, struct binder_thread,
3697 rb_node), print_all);
3698 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003699 struct binder_node *node = rb_entry(n, struct binder_node,
3700 rb_node);
3701 if (print_all || node->has_async_transaction)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003702 print_binder_node(m, node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003703 }
3704 if (print_all) {
3705 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003706 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003707 n = rb_next(n))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003708 print_binder_ref(m, rb_entry(n, struct binder_ref,
3709 rb_node_desc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003710 }
Todd Kjos19c98722017-06-29 12:01:40 -07003711 binder_alloc_print_allocated(m, &proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003712 list_for_each_entry(w, &proc->todo, entry)
3713 print_binder_work(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003714 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003715 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003716 break;
3717 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003718 if (!print_all && m->count == header_pos)
3719 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003720}
3721
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003722static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003723 "BR_ERROR",
3724 "BR_OK",
3725 "BR_TRANSACTION",
3726 "BR_REPLY",
3727 "BR_ACQUIRE_RESULT",
3728 "BR_DEAD_REPLY",
3729 "BR_TRANSACTION_COMPLETE",
3730 "BR_INCREFS",
3731 "BR_ACQUIRE",
3732 "BR_RELEASE",
3733 "BR_DECREFS",
3734 "BR_ATTEMPT_ACQUIRE",
3735 "BR_NOOP",
3736 "BR_SPAWN_LOOPER",
3737 "BR_FINISHED",
3738 "BR_DEAD_BINDER",
3739 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3740 "BR_FAILED_REPLY"
3741};
3742
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003743static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003744 "BC_TRANSACTION",
3745 "BC_REPLY",
3746 "BC_ACQUIRE_RESULT",
3747 "BC_FREE_BUFFER",
3748 "BC_INCREFS",
3749 "BC_ACQUIRE",
3750 "BC_RELEASE",
3751 "BC_DECREFS",
3752 "BC_INCREFS_DONE",
3753 "BC_ACQUIRE_DONE",
3754 "BC_ATTEMPT_ACQUIRE",
3755 "BC_REGISTER_LOOPER",
3756 "BC_ENTER_LOOPER",
3757 "BC_EXIT_LOOPER",
3758 "BC_REQUEST_DEATH_NOTIFICATION",
3759 "BC_CLEAR_DEATH_NOTIFICATION",
Martijn Coenen79802402017-02-03 14:40:51 -08003760 "BC_DEAD_BINDER_DONE",
3761 "BC_TRANSACTION_SG",
3762 "BC_REPLY_SG",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003763};
3764
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003765static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003766 "proc",
3767 "thread",
3768 "node",
3769 "ref",
3770 "death",
3771 "transaction",
3772 "transaction_complete"
3773};
3774
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003775static void print_binder_stats(struct seq_file *m, const char *prefix,
3776 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003777{
3778 int i;
3779
3780 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003781 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003782 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07003783 int temp = atomic_read(&stats->bc[i]);
3784
3785 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003786 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07003787 binder_command_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003788 }
3789
3790 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003791 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003792 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07003793 int temp = atomic_read(&stats->br[i]);
3794
3795 if (temp)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003796 seq_printf(m, "%s%s: %d\n", prefix,
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07003797 binder_return_strings[i], temp);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003798 }
3799
3800 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003801 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003802 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003803 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003804 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07003805 int created = atomic_read(&stats->obj_created[i]);
3806 int deleted = atomic_read(&stats->obj_deleted[i]);
3807
3808 if (created || deleted)
3809 seq_printf(m, "%s%s: active %d total %d\n",
3810 prefix,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003811 binder_objstat_strings[i],
Badhri Jagan Sridharan0953c792017-06-29 12:01:44 -07003812 created - deleted,
3813 created);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003814 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003815}
3816
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003817static void print_binder_proc_stats(struct seq_file *m,
3818 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003819{
3820 struct binder_work *w;
3821 struct rb_node *n;
3822 int count, strong, weak;
3823
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003824 seq_printf(m, "proc %d\n", proc->pid);
Martijn Coenen14db3182017-02-03 14:40:47 -08003825 seq_printf(m, "context %s\n", proc->context->name);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003826 count = 0;
3827 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3828 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003829 seq_printf(m, " threads: %d\n", count);
3830 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003831 " ready threads %d\n"
3832 " free async space %zd\n", proc->requested_threads,
3833 proc->requested_threads_started, proc->max_threads,
Todd Kjos19c98722017-06-29 12:01:40 -07003834 proc->ready_threads,
3835 binder_alloc_get_free_async_space(&proc->alloc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003836 count = 0;
3837 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3838 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003839 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003840 count = 0;
3841 strong = 0;
3842 weak = 0;
3843 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3844 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3845 rb_node_desc);
3846 count++;
3847 strong += ref->strong;
3848 weak += ref->weak;
3849 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003850 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003851
Todd Kjos19c98722017-06-29 12:01:40 -07003852 count = binder_alloc_get_allocated_count(&proc->alloc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003853 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003854
3855 count = 0;
3856 list_for_each_entry(w, &proc->todo, entry) {
3857 switch (w->type) {
3858 case BINDER_WORK_TRANSACTION:
3859 count++;
3860 break;
3861 default:
3862 break;
3863 }
3864 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003865 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003866
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003867 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003868}
3869
3870
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003871static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003872{
3873 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003874 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003875
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003876 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003877
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003878 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003879
Todd Kjosc44b1232017-06-29 12:01:43 -07003880 spin_lock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003881 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003882 seq_puts(m, "dead nodes:\n");
Sasha Levinb67bfe02013-02-27 17:06:00 -08003883 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003884 print_binder_node(m, node);
Todd Kjosc44b1232017-06-29 12:01:43 -07003885 spin_unlock(&binder_dead_nodes_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003886
Todd Kjosc44b1232017-06-29 12:01:43 -07003887 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08003888 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003889 print_binder_proc(m, proc, 1);
Todd Kjosc44b1232017-06-29 12:01:43 -07003890 mutex_unlock(&binder_procs_lock);
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003891 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003892 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003893}
3894
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003895static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003896{
3897 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003898
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003899 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003900
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003901 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003902
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003903 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003904
Todd Kjosc44b1232017-06-29 12:01:43 -07003905 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08003906 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003907 print_binder_proc_stats(m, proc);
Todd Kjosc44b1232017-06-29 12:01:43 -07003908 mutex_unlock(&binder_procs_lock);
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003909 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003910 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003911}
3912
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003913static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003914{
3915 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003916
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003917 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003918
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003919 seq_puts(m, "binder transactions:\n");
Todd Kjosc44b1232017-06-29 12:01:43 -07003920 mutex_lock(&binder_procs_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08003921 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003922 print_binder_proc(m, proc, 0);
Todd Kjosc44b1232017-06-29 12:01:43 -07003923 mutex_unlock(&binder_procs_lock);
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003924 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003925 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003926}
3927
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003928static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003929{
Riley Andrews83050a42016-02-09 21:05:33 -08003930 struct binder_proc *itr;
Martijn Coenen14db3182017-02-03 14:40:47 -08003931 int pid = (unsigned long)m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003932
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003933 binder_lock(__func__);
Riley Andrews83050a42016-02-09 21:05:33 -08003934
Todd Kjosc44b1232017-06-29 12:01:43 -07003935 mutex_lock(&binder_procs_lock);
Riley Andrews83050a42016-02-09 21:05:33 -08003936 hlist_for_each_entry(itr, &binder_procs, proc_node) {
Martijn Coenen14db3182017-02-03 14:40:47 -08003937 if (itr->pid == pid) {
3938 seq_puts(m, "binder proc state:\n");
3939 print_binder_proc(m, itr, 1);
Riley Andrews83050a42016-02-09 21:05:33 -08003940 }
3941 }
Todd Kjosc44b1232017-06-29 12:01:43 -07003942 mutex_unlock(&binder_procs_lock);
3943
Todd Kjos1cf29cf2017-06-29 12:01:42 -07003944 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003945 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003946}
3947
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003948static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003949 struct binder_transaction_log_entry *e)
3950{
Todd Kjosd99c7332017-06-29 12:01:53 -07003951 int debug_id = READ_ONCE(e->debug_id_done);
3952 /*
3953 * read barrier to guarantee debug_id_done read before
3954 * we print the log values
3955 */
3956 smp_rmb();
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003957 seq_printf(m,
Todd Kjosd99c7332017-06-29 12:01:53 -07003958 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003959 e->debug_id, (e->call_type == 2) ? "reply" :
3960 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
Martijn Coenen14db3182017-02-03 14:40:47 -08003961 e->from_thread, e->to_proc, e->to_thread, e->context_name,
Todd Kjos57ada2f2017-06-29 12:01:46 -07003962 e->to_node, e->target_handle, e->data_size, e->offsets_size,
3963 e->return_error, e->return_error_param,
3964 e->return_error_line);
Todd Kjosd99c7332017-06-29 12:01:53 -07003965 /*
3966 * read-barrier to guarantee read of debug_id_done after
3967 * done printing the fields of the entry
3968 */
3969 smp_rmb();
3970 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
3971 "\n" : " (incomplete)\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003972}
3973
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003974static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003975{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003976 struct binder_transaction_log *log = m->private;
Todd Kjosd99c7332017-06-29 12:01:53 -07003977 unsigned int log_cur = atomic_read(&log->cur);
3978 unsigned int count;
3979 unsigned int cur;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003980 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003981
Todd Kjosd99c7332017-06-29 12:01:53 -07003982 count = log_cur + 1;
3983 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
3984 0 : count % ARRAY_SIZE(log->entry);
3985 if (count > ARRAY_SIZE(log->entry) || log->full)
3986 count = ARRAY_SIZE(log->entry);
3987 for (i = 0; i < count; i++) {
3988 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
3989
3990 print_binder_transaction_log_entry(m, &log->entry[index]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003991 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003992 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003993}
3994
3995static const struct file_operations binder_fops = {
3996 .owner = THIS_MODULE,
3997 .poll = binder_poll,
3998 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003999 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004000 .mmap = binder_mmap,
4001 .open = binder_open,
4002 .flush = binder_flush,
4003 .release = binder_release,
4004};
4005
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07004006BINDER_DEBUG_ENTRY(state);
4007BINDER_DEBUG_ENTRY(stats);
4008BINDER_DEBUG_ENTRY(transactions);
4009BINDER_DEBUG_ENTRY(transaction_log);
4010
Martijn Coenenac4812c2017-02-03 14:40:48 -08004011static int __init init_binder_device(const char *name)
4012{
4013 int ret;
4014 struct binder_device *binder_device;
4015
4016 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
4017 if (!binder_device)
4018 return -ENOMEM;
4019
4020 binder_device->miscdev.fops = &binder_fops;
4021 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
4022 binder_device->miscdev.name = name;
4023
4024 binder_device->context.binder_context_mgr_uid = INVALID_UID;
4025 binder_device->context.name = name;
Todd Kjosc44b1232017-06-29 12:01:43 -07004026 mutex_init(&binder_device->context.context_mgr_node_lock);
Martijn Coenenac4812c2017-02-03 14:40:48 -08004027
4028 ret = misc_register(&binder_device->miscdev);
4029 if (ret < 0) {
4030 kfree(binder_device);
4031 return ret;
4032 }
4033
4034 hlist_add_head(&binder_device->hlist, &binder_devices);
4035
4036 return ret;
4037}
4038
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004039static int __init binder_init(void)
4040{
4041 int ret;
Martijn Coenenac4812c2017-02-03 14:40:48 -08004042 char *device_name, *device_names;
4043 struct binder_device *device;
4044 struct hlist_node *tmp;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004045
Todd Kjosd99c7332017-06-29 12:01:53 -07004046 atomic_set(&binder_transaction_log.cur, ~0U);
4047 atomic_set(&binder_transaction_log_failed.cur, ~0U);
4048
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004049 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
4050 if (binder_debugfs_dir_entry_root)
4051 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
4052 binder_debugfs_dir_entry_root);
Martijn Coenenac4812c2017-02-03 14:40:48 -08004053
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07004054 if (binder_debugfs_dir_entry_root) {
4055 debugfs_create_file("state",
4056 S_IRUGO,
4057 binder_debugfs_dir_entry_root,
4058 NULL,
4059 &binder_state_fops);
4060 debugfs_create_file("stats",
4061 S_IRUGO,
4062 binder_debugfs_dir_entry_root,
4063 NULL,
4064 &binder_stats_fops);
4065 debugfs_create_file("transactions",
4066 S_IRUGO,
4067 binder_debugfs_dir_entry_root,
4068 NULL,
4069 &binder_transactions_fops);
4070 debugfs_create_file("transaction_log",
4071 S_IRUGO,
4072 binder_debugfs_dir_entry_root,
4073 &binder_transaction_log,
4074 &binder_transaction_log_fops);
4075 debugfs_create_file("failed_transaction_log",
4076 S_IRUGO,
4077 binder_debugfs_dir_entry_root,
4078 &binder_transaction_log_failed,
4079 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004080 }
Martijn Coenenac4812c2017-02-03 14:40:48 -08004081
4082 /*
4083 * Copy the module_parameter string, because we don't want to
4084 * tokenize it in-place.
4085 */
4086 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
4087 if (!device_names) {
4088 ret = -ENOMEM;
4089 goto err_alloc_device_names_failed;
4090 }
4091 strcpy(device_names, binder_devices_param);
4092
4093 while ((device_name = strsep(&device_names, ","))) {
4094 ret = init_binder_device(device_name);
4095 if (ret)
4096 goto err_init_binder_device_failed;
4097 }
4098
4099 return ret;
4100
4101err_init_binder_device_failed:
4102 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
4103 misc_deregister(&device->miscdev);
4104 hlist_del(&device->hlist);
4105 kfree(device);
4106 }
4107err_alloc_device_names_failed:
4108 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
4109
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004110 return ret;
4111}
4112
4113device_initcall(binder_init);
4114
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07004115#define CREATE_TRACE_POINTS
4116#include "binder_trace.h"
4117
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09004118MODULE_LICENSE("GPL v2");