blob: 331d2abca9a24c0b26e85b5e39dea13bffb23690 [file] [log] [blame]
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Anmol Sarma56b468f2012-10-30 22:35:43 +053018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090020#include <asm/cacheflush.h>
21#include <linux/fdtable.h>
22#include <linux/file.h>
Colin Crosse2610b22013-05-06 23:50:15 +000023#include <linux/freezer.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090024#include <linux/fs.h>
25#include <linux/list.h>
26#include <linux/miscdevice.h>
27#include <linux/mm.h>
28#include <linux/module.h>
29#include <linux/mutex.h>
30#include <linux/nsproxy.h>
31#include <linux/poll.h>
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070032#include <linux/debugfs.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090033#include <linux/rbtree.h>
34#include <linux/sched.h>
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070035#include <linux/seq_file.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090036#include <linux/uaccess.h>
37#include <linux/vmalloc.h>
Colin Crossc11a1662010-04-15 15:21:51 -070038#include <linux/slab.h>
Eric W. Biederman17cf22c2010-03-02 14:51:53 -080039#include <linux/pid_namespace.h>
Stephen Smalley79af7302015-01-21 10:54:10 -050040#include <linux/security.h>
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090041
Greg Kroah-Hartman9246a4a2014-10-16 15:26:51 +020042#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
43#define BINDER_IPC_32BIT 1
44#endif
45
46#include <uapi/linux/android/binder.h>
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070047#include "binder_trace.h"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090048
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -070049static DEFINE_MUTEX(binder_main_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090050static DEFINE_MUTEX(binder_deferred_lock);
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -080051static DEFINE_MUTEX(binder_mmap_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090052
53static HLIST_HEAD(binder_procs);
54static HLIST_HEAD(binder_deferred_list);
55static HLIST_HEAD(binder_dead_nodes);
56
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070057static struct dentry *binder_debugfs_dir_entry_root;
58static struct dentry *binder_debugfs_dir_entry_proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090059static struct binder_node *binder_context_mgr_node;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -060060static kuid_t binder_context_mgr_uid = INVALID_UID;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090061static int binder_last_id;
62
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070063#define BINDER_DEBUG_ENTRY(name) \
64static int binder_##name##_open(struct inode *inode, struct file *file) \
65{ \
Arve Hjønnevåg16b66552009-04-28 20:57:50 -070066 return single_open(file, binder_##name##_show, inode->i_private); \
Arve Hjønnevåg5249f482009-04-28 20:57:50 -070067} \
68\
69static const struct file_operations binder_##name##_fops = { \
70 .owner = THIS_MODULE, \
71 .open = binder_##name##_open, \
72 .read = seq_read, \
73 .llseek = seq_lseek, \
74 .release = single_release, \
75}
76
77static int binder_proc_show(struct seq_file *m, void *unused);
78BINDER_DEBUG_ENTRY(proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +090079
80/* This is only defined in include/asm-arm/sizes.h */
81#ifndef SZ_1K
82#define SZ_1K 0x400
83#endif
84
85#ifndef SZ_4M
86#define SZ_4M 0x400000
87#endif
88
89#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
90
91#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
92
93enum {
94 BINDER_DEBUG_USER_ERROR = 1U << 0,
95 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
96 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
97 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
98 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
99 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
100 BINDER_DEBUG_READ_WRITE = 1U << 6,
101 BINDER_DEBUG_USER_REFS = 1U << 7,
102 BINDER_DEBUG_THREADS = 1U << 8,
103 BINDER_DEBUG_TRANSACTION = 1U << 9,
104 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
105 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
106 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
107 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
108 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
109 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
110};
111static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
112 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
113module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
114
Zhengwang Ruan2c523252012-03-07 10:36:57 +0800115static bool binder_debug_no_lock;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900116module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
117
118static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
119static int binder_stop_on_user_error;
120
121static int binder_set_stop_on_user_error(const char *val,
122 struct kernel_param *kp)
123{
124 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900125
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900126 ret = param_set_int(val, kp);
127 if (binder_stop_on_user_error < 2)
128 wake_up(&binder_user_error_wait);
129 return ret;
130}
131module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
132 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
133
134#define binder_debug(mask, x...) \
135 do { \
136 if (binder_debug_mask & mask) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400137 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900138 } while (0)
139
140#define binder_user_error(x...) \
141 do { \
142 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
Sherwin Soltani258767f2012-06-26 02:00:30 -0400143 pr_info(x); \
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900144 if (binder_stop_on_user_error) \
145 binder_stop_on_user_error = 2; \
146 } while (0)
147
Martijn Coenenfeba3902017-02-03 14:40:45 -0800148#define to_flat_binder_object(hdr) \
149 container_of(hdr, struct flat_binder_object, hdr)
150
151#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
152
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900153enum binder_stat_types {
154 BINDER_STAT_PROC,
155 BINDER_STAT_THREAD,
156 BINDER_STAT_NODE,
157 BINDER_STAT_REF,
158 BINDER_STAT_DEATH,
159 BINDER_STAT_TRANSACTION,
160 BINDER_STAT_TRANSACTION_COMPLETE,
161 BINDER_STAT_COUNT
162};
163
164struct binder_stats {
165 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
166 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
167 int obj_created[BINDER_STAT_COUNT];
168 int obj_deleted[BINDER_STAT_COUNT];
169};
170
171static struct binder_stats binder_stats;
172
173static inline void binder_stats_deleted(enum binder_stat_types type)
174{
175 binder_stats.obj_deleted[type]++;
176}
177
178static inline void binder_stats_created(enum binder_stat_types type)
179{
180 binder_stats.obj_created[type]++;
181}
182
183struct binder_transaction_log_entry {
184 int debug_id;
185 int call_type;
186 int from_proc;
187 int from_thread;
188 int target_handle;
189 int to_proc;
190 int to_thread;
191 int to_node;
192 int data_size;
193 int offsets_size;
194};
195struct binder_transaction_log {
196 int next;
197 int full;
198 struct binder_transaction_log_entry entry[32];
199};
200static struct binder_transaction_log binder_transaction_log;
201static struct binder_transaction_log binder_transaction_log_failed;
202
203static struct binder_transaction_log_entry *binder_transaction_log_add(
204 struct binder_transaction_log *log)
205{
206 struct binder_transaction_log_entry *e;
Seunghun Lee10f62862014-05-01 01:30:23 +0900207
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900208 e = &log->entry[log->next];
209 memset(e, 0, sizeof(*e));
210 log->next++;
211 if (log->next == ARRAY_SIZE(log->entry)) {
212 log->next = 0;
213 log->full = 1;
214 }
215 return e;
216}
217
218struct binder_work {
219 struct list_head entry;
220 enum {
221 BINDER_WORK_TRANSACTION = 1,
222 BINDER_WORK_TRANSACTION_COMPLETE,
223 BINDER_WORK_NODE,
224 BINDER_WORK_DEAD_BINDER,
225 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
226 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
227 } type;
228};
229
230struct binder_node {
231 int debug_id;
232 struct binder_work work;
233 union {
234 struct rb_node rb_node;
235 struct hlist_node dead_node;
236 };
237 struct binder_proc *proc;
238 struct hlist_head refs;
239 int internal_strong_refs;
240 int local_weak_refs;
241 int local_strong_refs;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800242 binder_uintptr_t ptr;
243 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900244 unsigned has_strong_ref:1;
245 unsigned pending_strong_ref:1;
246 unsigned has_weak_ref:1;
247 unsigned pending_weak_ref:1;
248 unsigned has_async_transaction:1;
249 unsigned accept_fds:1;
250 unsigned min_priority:8;
251 struct list_head async_todo;
252};
253
254struct binder_ref_death {
255 struct binder_work work;
Arve Hjønnevågda498892014-02-21 14:40:26 -0800256 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900257};
258
259struct binder_ref {
260 /* Lookups needed: */
261 /* node + proc => ref (transaction) */
262 /* desc + proc => ref (transaction, inc/dec ref) */
263 /* node => refs + procs (proc exit) */
264 int debug_id;
265 struct rb_node rb_node_desc;
266 struct rb_node rb_node_node;
267 struct hlist_node node_entry;
268 struct binder_proc *proc;
269 struct binder_node *node;
270 uint32_t desc;
271 int strong;
272 int weak;
273 struct binder_ref_death *death;
274};
275
276struct binder_buffer {
Justin P. Mattock217218f2012-01-12 06:51:31 -0800277 struct list_head entry; /* free and allocated entries by address */
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900278 struct rb_node rb_node; /* free entry by size or allocated entry */
279 /* by address */
280 unsigned free:1;
281 unsigned allow_user_free:1;
282 unsigned async_transaction:1;
283 unsigned debug_id:29;
284
285 struct binder_transaction *transaction;
286
287 struct binder_node *target_node;
288 size_t data_size;
289 size_t offsets_size;
290 uint8_t data[0];
291};
292
293enum binder_deferred_state {
294 BINDER_DEFERRED_PUT_FILES = 0x01,
295 BINDER_DEFERRED_FLUSH = 0x02,
296 BINDER_DEFERRED_RELEASE = 0x04,
297};
298
299struct binder_proc {
300 struct hlist_node proc_node;
301 struct rb_root threads;
302 struct rb_root nodes;
303 struct rb_root refs_by_desc;
304 struct rb_root refs_by_node;
305 int pid;
306 struct vm_area_struct *vma;
Arve Hjønnevåg2a909572012-03-08 15:43:36 -0800307 struct mm_struct *vma_vm_mm;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900308 struct task_struct *tsk;
309 struct files_struct *files;
310 struct hlist_node deferred_work_node;
311 int deferred_work;
312 void *buffer;
313 ptrdiff_t user_buffer_offset;
314
315 struct list_head buffers;
316 struct rb_root free_buffers;
317 struct rb_root allocated_buffers;
318 size_t free_async_space;
319
320 struct page **pages;
321 size_t buffer_size;
322 uint32_t buffer_free;
323 struct list_head todo;
324 wait_queue_head_t wait;
325 struct binder_stats stats;
326 struct list_head delivered_death;
327 int max_threads;
328 int requested_threads;
329 int requested_threads_started;
330 int ready_threads;
331 long default_priority;
Arve Hjønnevåg16b66552009-04-28 20:57:50 -0700332 struct dentry *debugfs_entry;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900333};
334
335enum {
336 BINDER_LOOPER_STATE_REGISTERED = 0x01,
337 BINDER_LOOPER_STATE_ENTERED = 0x02,
338 BINDER_LOOPER_STATE_EXITED = 0x04,
339 BINDER_LOOPER_STATE_INVALID = 0x08,
340 BINDER_LOOPER_STATE_WAITING = 0x10,
341 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
342};
343
344struct binder_thread {
345 struct binder_proc *proc;
346 struct rb_node rb_node;
347 int pid;
348 int looper;
349 struct binder_transaction *transaction_stack;
350 struct list_head todo;
351 uint32_t return_error; /* Write failed, return error code in read buf */
352 uint32_t return_error2; /* Write failed, return error code in read */
353 /* buffer. Used when sending a reply to a dead process that */
354 /* we are also waiting on */
355 wait_queue_head_t wait;
356 struct binder_stats stats;
357};
358
359struct binder_transaction {
360 int debug_id;
361 struct binder_work work;
362 struct binder_thread *from;
363 struct binder_transaction *from_parent;
364 struct binder_proc *to_proc;
365 struct binder_thread *to_thread;
366 struct binder_transaction *to_parent;
367 unsigned need_reply:1;
368 /* unsigned is_dead:1; */ /* not used at the moment */
369
370 struct binder_buffer *buffer;
371 unsigned int code;
372 unsigned int flags;
373 long priority;
374 long saved_priority;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -0600375 kuid_t sender_euid;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900376};
377
378static void
379binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
380
Sachin Kamatefde99c2012-08-17 16:39:36 +0530381static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900382{
383 struct files_struct *files = proc->files;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900384 unsigned long rlim_cur;
385 unsigned long irqs;
386
387 if (files == NULL)
388 return -ESRCH;
389
Al Virodcfadfa2012-08-12 17:27:30 -0400390 if (!lock_task_sighand(proc->tsk, &irqs))
391 return -EMFILE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900392
Al Virodcfadfa2012-08-12 17:27:30 -0400393 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
394 unlock_task_sighand(proc->tsk, &irqs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900395
Al Virodcfadfa2012-08-12 17:27:30 -0400396 return __alloc_fd(files, 0, rlim_cur, flags);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900397}
398
399/*
400 * copied from fd_install
401 */
402static void task_fd_install(
403 struct binder_proc *proc, unsigned int fd, struct file *file)
404{
Al Virof869e8a2012-08-15 21:06:33 -0400405 if (proc->files)
406 __fd_install(proc->files, fd, file);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900407}
408
409/*
410 * copied from sys_close
411 */
412static long task_close_fd(struct binder_proc *proc, unsigned int fd)
413{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900414 int retval;
415
Al Viro483ce1d2012-08-19 12:04:24 -0400416 if (proc->files == NULL)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900417 return -ESRCH;
418
Al Viro483ce1d2012-08-19 12:04:24 -0400419 retval = __close_fd(proc->files, fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900420 /* can't restart close syscall because file table entry was cleared */
421 if (unlikely(retval == -ERESTARTSYS ||
422 retval == -ERESTARTNOINTR ||
423 retval == -ERESTARTNOHAND ||
424 retval == -ERESTART_RESTARTBLOCK))
425 retval = -EINTR;
426
427 return retval;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900428}
429
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -0700430static inline void binder_lock(const char *tag)
431{
432 trace_binder_lock(tag);
433 mutex_lock(&binder_main_lock);
434 trace_binder_locked(tag);
435}
436
437static inline void binder_unlock(const char *tag)
438{
439 trace_binder_unlock(tag);
440 mutex_unlock(&binder_main_lock);
441}
442
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900443static void binder_set_nice(long nice)
444{
445 long min_nice;
Seunghun Lee10f62862014-05-01 01:30:23 +0900446
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900447 if (can_nice(current, nice)) {
448 set_user_nice(current, nice);
449 return;
450 }
Dongsheng Yang7aa2c012014-05-08 18:33:49 +0900451 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900452 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530453 "%d: nice value %ld not allowed use %ld instead\n",
454 current->pid, nice, min_nice);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900455 set_user_nice(current, min_nice);
Dongsheng Yang8698a742014-03-11 18:09:12 +0800456 if (min_nice <= MAX_NICE)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900457 return;
Anmol Sarma56b468f2012-10-30 22:35:43 +0530458 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900459}
460
461static size_t binder_buffer_size(struct binder_proc *proc,
462 struct binder_buffer *buffer)
463{
464 if (list_is_last(&buffer->entry, &proc->buffers))
465 return proc->buffer + proc->buffer_size - (void *)buffer->data;
Karthik Nayak78733112014-06-21 20:23:16 +0530466 return (size_t)list_entry(buffer->entry.next,
467 struct binder_buffer, entry) - (size_t)buffer->data;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900468}
469
470static void binder_insert_free_buffer(struct binder_proc *proc,
471 struct binder_buffer *new_buffer)
472{
473 struct rb_node **p = &proc->free_buffers.rb_node;
474 struct rb_node *parent = NULL;
475 struct binder_buffer *buffer;
476 size_t buffer_size;
477 size_t new_buffer_size;
478
479 BUG_ON(!new_buffer->free);
480
481 new_buffer_size = binder_buffer_size(proc, new_buffer);
482
483 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530484 "%d: add free buffer, size %zd, at %p\n",
485 proc->pid, new_buffer_size, new_buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900486
487 while (*p) {
488 parent = *p;
489 buffer = rb_entry(parent, struct binder_buffer, rb_node);
490 BUG_ON(!buffer->free);
491
492 buffer_size = binder_buffer_size(proc, buffer);
493
494 if (new_buffer_size < buffer_size)
495 p = &parent->rb_left;
496 else
497 p = &parent->rb_right;
498 }
499 rb_link_node(&new_buffer->rb_node, parent, p);
500 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
501}
502
503static void binder_insert_allocated_buffer(struct binder_proc *proc,
504 struct binder_buffer *new_buffer)
505{
506 struct rb_node **p = &proc->allocated_buffers.rb_node;
507 struct rb_node *parent = NULL;
508 struct binder_buffer *buffer;
509
510 BUG_ON(new_buffer->free);
511
512 while (*p) {
513 parent = *p;
514 buffer = rb_entry(parent, struct binder_buffer, rb_node);
515 BUG_ON(buffer->free);
516
517 if (new_buffer < buffer)
518 p = &parent->rb_left;
519 else if (new_buffer > buffer)
520 p = &parent->rb_right;
521 else
522 BUG();
523 }
524 rb_link_node(&new_buffer->rb_node, parent, p);
525 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
526}
527
528static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800529 uintptr_t user_ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900530{
531 struct rb_node *n = proc->allocated_buffers.rb_node;
532 struct binder_buffer *buffer;
533 struct binder_buffer *kern_ptr;
534
Arve Hjønnevågda498892014-02-21 14:40:26 -0800535 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
536 - offsetof(struct binder_buffer, data));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900537
538 while (n) {
539 buffer = rb_entry(n, struct binder_buffer, rb_node);
540 BUG_ON(buffer->free);
541
542 if (kern_ptr < buffer)
543 n = n->rb_left;
544 else if (kern_ptr > buffer)
545 n = n->rb_right;
546 else
547 return buffer;
548 }
549 return NULL;
550}
551
552static int binder_update_page_range(struct binder_proc *proc, int allocate,
553 void *start, void *end,
554 struct vm_area_struct *vma)
555{
556 void *page_addr;
557 unsigned long user_page_addr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900558 struct page **page;
559 struct mm_struct *mm;
560
561 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530562 "%d: %s pages %p-%p\n", proc->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900563 allocate ? "allocate" : "free", start, end);
564
565 if (end <= start)
566 return 0;
567
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -0700568 trace_binder_update_page_range(proc, allocate, start, end);
569
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900570 if (vma)
571 mm = NULL;
572 else
573 mm = get_task_mm(proc->tsk);
574
575 if (mm) {
576 down_write(&mm->mmap_sem);
577 vma = proc->vma;
Arve Hjønnevåg2a909572012-03-08 15:43:36 -0800578 if (vma && mm != proc->vma_vm_mm) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530579 pr_err("%d: vma mm and task mm mismatch\n",
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -0800580 proc->pid);
581 vma = NULL;
582 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900583 }
584
585 if (allocate == 0)
586 goto free_range;
587
588 if (vma == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530589 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
590 proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900591 goto err_no_vma;
592 }
593
594 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
595 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +0900596
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900597 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
598
599 BUG_ON(*page);
Arve Hjønnevåg585650d2012-10-16 15:29:55 -0700600 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900601 if (*page == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530602 pr_err("%d: binder_alloc_buf failed for page at %p\n",
603 proc->pid, page_addr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900604 goto err_alloc_page_failed;
605 }
Andrey Ryabininf4c72c72015-02-27 20:44:21 +0300606 ret = map_kernel_range_noflush((unsigned long)page_addr,
607 PAGE_SIZE, PAGE_KERNEL, page);
608 flush_cache_vmap((unsigned long)page_addr,
609 (unsigned long)page_addr + PAGE_SIZE);
610 if (ret != 1) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530611 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900612 proc->pid, page_addr);
613 goto err_map_kernel_failed;
614 }
615 user_page_addr =
616 (uintptr_t)page_addr + proc->user_buffer_offset;
617 ret = vm_insert_page(vma, user_page_addr, page[0]);
618 if (ret) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530619 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900620 proc->pid, user_page_addr);
621 goto err_vm_insert_page_failed;
622 }
623 /* vm_insert_page does not seem to increment the refcount */
624 }
625 if (mm) {
626 up_write(&mm->mmap_sem);
627 mmput(mm);
628 }
629 return 0;
630
631free_range:
632 for (page_addr = end - PAGE_SIZE; page_addr >= start;
633 page_addr -= PAGE_SIZE) {
634 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
635 if (vma)
636 zap_page_range(vma, (uintptr_t)page_addr +
637 proc->user_buffer_offset, PAGE_SIZE, NULL);
638err_vm_insert_page_failed:
639 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
640err_map_kernel_failed:
641 __free_page(*page);
642 *page = NULL;
643err_alloc_page_failed:
644 ;
645 }
646err_no_vma:
647 if (mm) {
648 up_write(&mm->mmap_sem);
649 mmput(mm);
650 }
651 return -ENOMEM;
652}
653
654static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
655 size_t data_size,
656 size_t offsets_size, int is_async)
657{
658 struct rb_node *n = proc->free_buffers.rb_node;
659 struct binder_buffer *buffer;
660 size_t buffer_size;
661 struct rb_node *best_fit = NULL;
662 void *has_page_addr;
663 void *end_page_addr;
664 size_t size;
665
666 if (proc->vma == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530667 pr_err("%d: binder_alloc_buf, no vma\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900668 proc->pid);
669 return NULL;
670 }
671
672 size = ALIGN(data_size, sizeof(void *)) +
673 ALIGN(offsets_size, sizeof(void *));
674
675 if (size < data_size || size < offsets_size) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530676 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
677 proc->pid, data_size, offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900678 return NULL;
679 }
680
681 if (is_async &&
682 proc->free_async_space < size + sizeof(struct binder_buffer)) {
683 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530684 "%d: binder_alloc_buf size %zd failed, no async space left\n",
685 proc->pid, size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900686 return NULL;
687 }
688
689 while (n) {
690 buffer = rb_entry(n, struct binder_buffer, rb_node);
691 BUG_ON(!buffer->free);
692 buffer_size = binder_buffer_size(proc, buffer);
693
694 if (size < buffer_size) {
695 best_fit = n;
696 n = n->rb_left;
697 } else if (size > buffer_size)
698 n = n->rb_right;
699 else {
700 best_fit = n;
701 break;
702 }
703 }
704 if (best_fit == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530705 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
706 proc->pid, size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900707 return NULL;
708 }
709 if (n == NULL) {
710 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
711 buffer_size = binder_buffer_size(proc, buffer);
712 }
713
714 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530715 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
716 proc->pid, size, buffer, buffer_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900717
718 has_page_addr =
719 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
720 if (n == NULL) {
721 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
722 buffer_size = size; /* no room for other buffers */
723 else
724 buffer_size = size + sizeof(struct binder_buffer);
725 }
726 end_page_addr =
727 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
728 if (end_page_addr > has_page_addr)
729 end_page_addr = has_page_addr;
730 if (binder_update_page_range(proc, 1,
731 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
732 return NULL;
733
734 rb_erase(best_fit, &proc->free_buffers);
735 buffer->free = 0;
736 binder_insert_allocated_buffer(proc, buffer);
737 if (buffer_size != size) {
738 struct binder_buffer *new_buffer = (void *)buffer->data + size;
Seunghun Lee10f62862014-05-01 01:30:23 +0900739
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900740 list_add(&new_buffer->entry, &buffer->entry);
741 new_buffer->free = 1;
742 binder_insert_free_buffer(proc, new_buffer);
743 }
744 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530745 "%d: binder_alloc_buf size %zd got %p\n",
746 proc->pid, size, buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900747 buffer->data_size = data_size;
748 buffer->offsets_size = offsets_size;
749 buffer->async_transaction = is_async;
750 if (is_async) {
751 proc->free_async_space -= size + sizeof(struct binder_buffer);
752 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530753 "%d: binder_alloc_buf size %zd async free %zd\n",
754 proc->pid, size, proc->free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900755 }
756
757 return buffer;
758}
759
760static void *buffer_start_page(struct binder_buffer *buffer)
761{
762 return (void *)((uintptr_t)buffer & PAGE_MASK);
763}
764
765static void *buffer_end_page(struct binder_buffer *buffer)
766{
767 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
768}
769
770static void binder_delete_free_buffer(struct binder_proc *proc,
771 struct binder_buffer *buffer)
772{
773 struct binder_buffer *prev, *next = NULL;
774 int free_page_end = 1;
775 int free_page_start = 1;
776
777 BUG_ON(proc->buffers.next == &buffer->entry);
778 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
779 BUG_ON(!prev->free);
780 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
781 free_page_start = 0;
782 if (buffer_end_page(prev) == buffer_end_page(buffer))
783 free_page_end = 0;
784 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530785 "%d: merge free, buffer %p share page with %p\n",
786 proc->pid, buffer, prev);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900787 }
788
789 if (!list_is_last(&buffer->entry, &proc->buffers)) {
790 next = list_entry(buffer->entry.next,
791 struct binder_buffer, entry);
792 if (buffer_start_page(next) == buffer_end_page(buffer)) {
793 free_page_end = 0;
794 if (buffer_start_page(next) ==
795 buffer_start_page(buffer))
796 free_page_start = 0;
797 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530798 "%d: merge free, buffer %p share page with %p\n",
799 proc->pid, buffer, prev);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900800 }
801 }
802 list_del(&buffer->entry);
803 if (free_page_start || free_page_end) {
804 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Masanari Iida1dcdbfd2013-06-23 23:47:15 +0900805 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900806 proc->pid, buffer, free_page_start ? "" : " end",
807 free_page_end ? "" : " start", prev, next);
808 binder_update_page_range(proc, 0, free_page_start ?
809 buffer_start_page(buffer) : buffer_end_page(buffer),
810 (free_page_end ? buffer_end_page(buffer) :
811 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
812 }
813}
814
815static void binder_free_buf(struct binder_proc *proc,
816 struct binder_buffer *buffer)
817{
818 size_t size, buffer_size;
819
820 buffer_size = binder_buffer_size(proc, buffer);
821
822 size = ALIGN(buffer->data_size, sizeof(void *)) +
823 ALIGN(buffer->offsets_size, sizeof(void *));
824
825 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530826 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
827 proc->pid, buffer, size, buffer_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900828
829 BUG_ON(buffer->free);
830 BUG_ON(size > buffer_size);
831 BUG_ON(buffer->transaction != NULL);
832 BUG_ON((void *)buffer < proc->buffer);
833 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
834
835 if (buffer->async_transaction) {
836 proc->free_async_space += size + sizeof(struct binder_buffer);
837
838 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530839 "%d: binder_free_buf size %zd async free %zd\n",
840 proc->pid, size, proc->free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900841 }
842
843 binder_update_page_range(proc, 0,
844 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
845 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
846 NULL);
847 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
848 buffer->free = 1;
849 if (!list_is_last(&buffer->entry, &proc->buffers)) {
850 struct binder_buffer *next = list_entry(buffer->entry.next,
851 struct binder_buffer, entry);
Seunghun Lee10f62862014-05-01 01:30:23 +0900852
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900853 if (next->free) {
854 rb_erase(&next->rb_node, &proc->free_buffers);
855 binder_delete_free_buffer(proc, next);
856 }
857 }
858 if (proc->buffers.next != &buffer->entry) {
859 struct binder_buffer *prev = list_entry(buffer->entry.prev,
860 struct binder_buffer, entry);
Seunghun Lee10f62862014-05-01 01:30:23 +0900861
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900862 if (prev->free) {
863 binder_delete_free_buffer(proc, buffer);
864 rb_erase(&prev->rb_node, &proc->free_buffers);
865 buffer = prev;
866 }
867 }
868 binder_insert_free_buffer(proc, buffer);
869}
870
871static struct binder_node *binder_get_node(struct binder_proc *proc,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800872 binder_uintptr_t ptr)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900873{
874 struct rb_node *n = proc->nodes.rb_node;
875 struct binder_node *node;
876
877 while (n) {
878 node = rb_entry(n, struct binder_node, rb_node);
879
880 if (ptr < node->ptr)
881 n = n->rb_left;
882 else if (ptr > node->ptr)
883 n = n->rb_right;
884 else
885 return node;
886 }
887 return NULL;
888}
889
890static struct binder_node *binder_new_node(struct binder_proc *proc,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800891 binder_uintptr_t ptr,
892 binder_uintptr_t cookie)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900893{
894 struct rb_node **p = &proc->nodes.rb_node;
895 struct rb_node *parent = NULL;
896 struct binder_node *node;
897
898 while (*p) {
899 parent = *p;
900 node = rb_entry(parent, struct binder_node, rb_node);
901
902 if (ptr < node->ptr)
903 p = &(*p)->rb_left;
904 else if (ptr > node->ptr)
905 p = &(*p)->rb_right;
906 else
907 return NULL;
908 }
909
910 node = kzalloc(sizeof(*node), GFP_KERNEL);
911 if (node == NULL)
912 return NULL;
913 binder_stats_created(BINDER_STAT_NODE);
914 rb_link_node(&node->rb_node, parent, p);
915 rb_insert_color(&node->rb_node, &proc->nodes);
916 node->debug_id = ++binder_last_id;
917 node->proc = proc;
918 node->ptr = ptr;
919 node->cookie = cookie;
920 node->work.type = BINDER_WORK_NODE;
921 INIT_LIST_HEAD(&node->work.entry);
922 INIT_LIST_HEAD(&node->async_todo);
923 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800924 "%d:%d node %d u%016llx c%016llx created\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900925 proc->pid, current->pid, node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -0800926 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900927 return node;
928}
929
930static int binder_inc_node(struct binder_node *node, int strong, int internal,
931 struct list_head *target_list)
932{
933 if (strong) {
934 if (internal) {
935 if (target_list == NULL &&
936 node->internal_strong_refs == 0 &&
937 !(node == binder_context_mgr_node &&
938 node->has_strong_ref)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530939 pr_err("invalid inc strong node for %d\n",
940 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900941 return -EINVAL;
942 }
943 node->internal_strong_refs++;
944 } else
945 node->local_strong_refs++;
946 if (!node->has_strong_ref && target_list) {
947 list_del_init(&node->work.entry);
948 list_add_tail(&node->work.entry, target_list);
949 }
950 } else {
951 if (!internal)
952 node->local_weak_refs++;
953 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
954 if (target_list == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +0530955 pr_err("invalid inc weak node for %d\n",
956 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900957 return -EINVAL;
958 }
959 list_add_tail(&node->work.entry, target_list);
960 }
961 }
962 return 0;
963}
964
965static int binder_dec_node(struct binder_node *node, int strong, int internal)
966{
967 if (strong) {
968 if (internal)
969 node->internal_strong_refs--;
970 else
971 node->local_strong_refs--;
972 if (node->local_strong_refs || node->internal_strong_refs)
973 return 0;
974 } else {
975 if (!internal)
976 node->local_weak_refs--;
977 if (node->local_weak_refs || !hlist_empty(&node->refs))
978 return 0;
979 }
980 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
981 if (list_empty(&node->work.entry)) {
982 list_add_tail(&node->work.entry, &node->proc->todo);
983 wake_up_interruptible(&node->proc->wait);
984 }
985 } else {
986 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
987 !node->local_weak_refs) {
988 list_del_init(&node->work.entry);
989 if (node->proc) {
990 rb_erase(&node->rb_node, &node->proc->nodes);
991 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530992 "refless node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900993 node->debug_id);
994 } else {
995 hlist_del(&node->dead_node);
996 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +0530997 "dead node %d deleted\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +0900998 node->debug_id);
999 }
1000 kfree(node);
1001 binder_stats_deleted(BINDER_STAT_NODE);
1002 }
1003 }
1004
1005 return 0;
1006}
1007
1008
1009static struct binder_ref *binder_get_ref(struct binder_proc *proc,
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001010 u32 desc, bool need_strong_ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001011{
1012 struct rb_node *n = proc->refs_by_desc.rb_node;
1013 struct binder_ref *ref;
1014
1015 while (n) {
1016 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1017
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001018 if (desc < ref->desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001019 n = n->rb_left;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001020 } else if (desc > ref->desc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001021 n = n->rb_right;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001022 } else if (need_strong_ref && !ref->strong) {
1023 binder_user_error("tried to use weak ref as strong ref\n");
1024 return NULL;
1025 } else {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001026 return ref;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001027 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001028 }
1029 return NULL;
1030}
1031
1032static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1033 struct binder_node *node)
1034{
1035 struct rb_node *n;
1036 struct rb_node **p = &proc->refs_by_node.rb_node;
1037 struct rb_node *parent = NULL;
1038 struct binder_ref *ref, *new_ref;
1039
1040 while (*p) {
1041 parent = *p;
1042 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1043
1044 if (node < ref->node)
1045 p = &(*p)->rb_left;
1046 else if (node > ref->node)
1047 p = &(*p)->rb_right;
1048 else
1049 return ref;
1050 }
1051 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1052 if (new_ref == NULL)
1053 return NULL;
1054 binder_stats_created(BINDER_STAT_REF);
1055 new_ref->debug_id = ++binder_last_id;
1056 new_ref->proc = proc;
1057 new_ref->node = node;
1058 rb_link_node(&new_ref->rb_node_node, parent, p);
1059 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1060
1061 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1062 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1063 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1064 if (ref->desc > new_ref->desc)
1065 break;
1066 new_ref->desc = ref->desc + 1;
1067 }
1068
1069 p = &proc->refs_by_desc.rb_node;
1070 while (*p) {
1071 parent = *p;
1072 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1073
1074 if (new_ref->desc < ref->desc)
1075 p = &(*p)->rb_left;
1076 else if (new_ref->desc > ref->desc)
1077 p = &(*p)->rb_right;
1078 else
1079 BUG();
1080 }
1081 rb_link_node(&new_ref->rb_node_desc, parent, p);
1082 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1083 if (node) {
1084 hlist_add_head(&new_ref->node_entry, &node->refs);
1085
1086 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301087 "%d new ref %d desc %d for node %d\n",
1088 proc->pid, new_ref->debug_id, new_ref->desc,
1089 node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001090 } else {
1091 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301092 "%d new ref %d desc %d for dead node\n",
1093 proc->pid, new_ref->debug_id, new_ref->desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001094 }
1095 return new_ref;
1096}
1097
1098static void binder_delete_ref(struct binder_ref *ref)
1099{
1100 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301101 "%d delete ref %d desc %d for node %d\n",
1102 ref->proc->pid, ref->debug_id, ref->desc,
1103 ref->node->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001104
1105 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1106 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1107 if (ref->strong)
1108 binder_dec_node(ref->node, 1, 1);
1109 hlist_del(&ref->node_entry);
1110 binder_dec_node(ref->node, 0, 1);
1111 if (ref->death) {
1112 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301113 "%d delete ref %d desc %d has death notification\n",
1114 ref->proc->pid, ref->debug_id, ref->desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001115 list_del(&ref->death->work.entry);
1116 kfree(ref->death);
1117 binder_stats_deleted(BINDER_STAT_DEATH);
1118 }
1119 kfree(ref);
1120 binder_stats_deleted(BINDER_STAT_REF);
1121}
1122
1123static int binder_inc_ref(struct binder_ref *ref, int strong,
1124 struct list_head *target_list)
1125{
1126 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001127
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001128 if (strong) {
1129 if (ref->strong == 0) {
1130 ret = binder_inc_node(ref->node, 1, 1, target_list);
1131 if (ret)
1132 return ret;
1133 }
1134 ref->strong++;
1135 } else {
1136 if (ref->weak == 0) {
1137 ret = binder_inc_node(ref->node, 0, 1, target_list);
1138 if (ret)
1139 return ret;
1140 }
1141 ref->weak++;
1142 }
1143 return 0;
1144}
1145
1146
1147static int binder_dec_ref(struct binder_ref *ref, int strong)
1148{
1149 if (strong) {
1150 if (ref->strong == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301151 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001152 ref->proc->pid, ref->debug_id,
1153 ref->desc, ref->strong, ref->weak);
1154 return -EINVAL;
1155 }
1156 ref->strong--;
1157 if (ref->strong == 0) {
1158 int ret;
Seunghun Lee10f62862014-05-01 01:30:23 +09001159
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001160 ret = binder_dec_node(ref->node, strong, 1);
1161 if (ret)
1162 return ret;
1163 }
1164 } else {
1165 if (ref->weak == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301166 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001167 ref->proc->pid, ref->debug_id,
1168 ref->desc, ref->strong, ref->weak);
1169 return -EINVAL;
1170 }
1171 ref->weak--;
1172 }
1173 if (ref->strong == 0 && ref->weak == 0)
1174 binder_delete_ref(ref);
1175 return 0;
1176}
1177
1178static void binder_pop_transaction(struct binder_thread *target_thread,
1179 struct binder_transaction *t)
1180{
1181 if (target_thread) {
1182 BUG_ON(target_thread->transaction_stack != t);
1183 BUG_ON(target_thread->transaction_stack->from != target_thread);
1184 target_thread->transaction_stack =
1185 target_thread->transaction_stack->from_parent;
1186 t->from = NULL;
1187 }
1188 t->need_reply = 0;
1189 if (t->buffer)
1190 t->buffer->transaction = NULL;
1191 kfree(t);
1192 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1193}
1194
1195static void binder_send_failed_reply(struct binder_transaction *t,
1196 uint32_t error_code)
1197{
1198 struct binder_thread *target_thread;
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001199 struct binder_transaction *next;
Seunghun Lee10f62862014-05-01 01:30:23 +09001200
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001201 BUG_ON(t->flags & TF_ONE_WAY);
1202 while (1) {
1203 target_thread = t->from;
1204 if (target_thread) {
1205 if (target_thread->return_error != BR_OK &&
1206 target_thread->return_error2 == BR_OK) {
1207 target_thread->return_error2 =
1208 target_thread->return_error;
1209 target_thread->return_error = BR_OK;
1210 }
1211 if (target_thread->return_error == BR_OK) {
1212 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301213 "send failed reply for transaction %d to %d:%d\n",
William Panlener0232a422014-09-03 22:44:03 -05001214 t->debug_id,
1215 target_thread->proc->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001216 target_thread->pid);
1217
1218 binder_pop_transaction(target_thread, t);
1219 target_thread->return_error = error_code;
1220 wake_up_interruptible(&target_thread->wait);
1221 } else {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301222 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1223 target_thread->proc->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001224 target_thread->pid,
1225 target_thread->return_error);
1226 }
1227 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001228 }
Lucas Tanured4ec15e2014-07-13 21:31:05 -03001229 next = t->from_parent;
1230
1231 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1232 "send failed reply for transaction %d, target dead\n",
1233 t->debug_id);
1234
1235 binder_pop_transaction(target_thread, t);
1236 if (next == NULL) {
1237 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1238 "reply failed, no target thread at root\n");
1239 return;
1240 }
1241 t = next;
1242 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1243 "reply failed, no target thread -- retry %d\n",
1244 t->debug_id);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001245 }
1246}
1247
Martijn Coenenfeba3902017-02-03 14:40:45 -08001248/**
1249 * binder_validate_object() - checks for a valid metadata object in a buffer.
1250 * @buffer: binder_buffer that we're parsing.
1251 * @offset: offset in the buffer at which to validate an object.
1252 *
1253 * Return: If there's a valid metadata object at @offset in @buffer, the
1254 * size of that object. Otherwise, it returns zero.
1255 */
1256static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1257{
1258 /* Check if we can read a header first */
1259 struct binder_object_header *hdr;
1260 size_t object_size = 0;
1261
1262 if (offset > buffer->data_size - sizeof(*hdr) ||
1263 buffer->data_size < sizeof(*hdr) ||
1264 !IS_ALIGNED(offset, sizeof(u32)))
1265 return 0;
1266
1267 /* Ok, now see if we can read a complete object. */
1268 hdr = (struct binder_object_header *)(buffer->data + offset);
1269 switch (hdr->type) {
1270 case BINDER_TYPE_BINDER:
1271 case BINDER_TYPE_WEAK_BINDER:
1272 case BINDER_TYPE_HANDLE:
1273 case BINDER_TYPE_WEAK_HANDLE:
1274 object_size = sizeof(struct flat_binder_object);
1275 break;
1276 case BINDER_TYPE_FD:
1277 object_size = sizeof(struct binder_fd_object);
1278 break;
1279 default:
1280 return 0;
1281 }
1282 if (offset <= buffer->data_size - object_size &&
1283 buffer->data_size >= object_size)
1284 return object_size;
1285 else
1286 return 0;
1287}
1288
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001289static void binder_transaction_buffer_release(struct binder_proc *proc,
1290 struct binder_buffer *buffer,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001291 binder_size_t *failed_at)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001292{
Arve Hjønnevågda498892014-02-21 14:40:26 -08001293 binder_size_t *offp, *off_end;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001294 int debug_id = buffer->debug_id;
1295
1296 binder_debug(BINDER_DEBUG_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301297 "%d buffer release %d, size %zd-%zd, failed at %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001298 proc->pid, buffer->debug_id,
1299 buffer->data_size, buffer->offsets_size, failed_at);
1300
1301 if (buffer->target_node)
1302 binder_dec_node(buffer->target_node, 1, 0);
1303
Arve Hjønnevågda498892014-02-21 14:40:26 -08001304 offp = (binder_size_t *)(buffer->data +
1305 ALIGN(buffer->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001306 if (failed_at)
1307 off_end = failed_at;
1308 else
1309 off_end = (void *)offp + buffer->offsets_size;
1310 for (; offp < off_end; offp++) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001311 struct binder_object_header *hdr;
1312 size_t object_size = binder_validate_object(buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09001313
Martijn Coenenfeba3902017-02-03 14:40:45 -08001314 if (object_size == 0) {
1315 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
Arve Hjønnevågda498892014-02-21 14:40:26 -08001316 debug_id, (u64)*offp, buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001317 continue;
1318 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001319 hdr = (struct binder_object_header *)(buffer->data + *offp);
1320 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001321 case BINDER_TYPE_BINDER:
1322 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001323 struct flat_binder_object *fp;
1324 struct binder_node *node;
Seunghun Lee10f62862014-05-01 01:30:23 +09001325
Martijn Coenenfeba3902017-02-03 14:40:45 -08001326 fp = to_flat_binder_object(hdr);
1327 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001328 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001329 pr_err("transaction release %d bad node %016llx\n",
1330 debug_id, (u64)fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001331 break;
1332 }
1333 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001334 " node %d u%016llx\n",
1335 node->debug_id, (u64)node->ptr);
Martijn Coenenfeba3902017-02-03 14:40:45 -08001336 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1337 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001338 } break;
1339 case BINDER_TYPE_HANDLE:
1340 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001341 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001342 struct binder_ref *ref;
1343
Martijn Coenenfeba3902017-02-03 14:40:45 -08001344 fp = to_flat_binder_object(hdr);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001345 ref = binder_get_ref(proc, fp->handle,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001346 hdr->type == BINDER_TYPE_HANDLE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001347 if (ref == NULL) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001348 pr_err("transaction release %d bad handle %d\n",
Anmol Sarma56b468f2012-10-30 22:35:43 +05301349 debug_id, fp->handle);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001350 break;
1351 }
1352 binder_debug(BINDER_DEBUG_TRANSACTION,
1353 " ref %d desc %d (node %d)\n",
1354 ref->debug_id, ref->desc, ref->node->debug_id);
Martijn Coenenfeba3902017-02-03 14:40:45 -08001355 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001356 } break;
1357
Martijn Coenenfeba3902017-02-03 14:40:45 -08001358 case BINDER_TYPE_FD: {
1359 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1360
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001361 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001362 " fd %d\n", fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001363 if (failed_at)
Martijn Coenenfeba3902017-02-03 14:40:45 -08001364 task_close_fd(proc, fp->fd);
1365 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001366
1367 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001368 pr_err("transaction release %d bad object type %x\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001369 debug_id, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001370 break;
1371 }
1372 }
1373}
1374
1375static void binder_transaction(struct binder_proc *proc,
1376 struct binder_thread *thread,
1377 struct binder_transaction_data *tr, int reply)
1378{
1379 struct binder_transaction *t;
1380 struct binder_work *tcomplete;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001381 binder_size_t *offp, *off_end;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001382 binder_size_t off_min;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001383 struct binder_proc *target_proc;
1384 struct binder_thread *target_thread = NULL;
1385 struct binder_node *target_node = NULL;
1386 struct list_head *target_list;
1387 wait_queue_head_t *target_wait;
1388 struct binder_transaction *in_reply_to = NULL;
1389 struct binder_transaction_log_entry *e;
1390 uint32_t return_error;
1391
1392 e = binder_transaction_log_add(&binder_transaction_log);
1393 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1394 e->from_proc = proc->pid;
1395 e->from_thread = thread->pid;
1396 e->target_handle = tr->target.handle;
1397 e->data_size = tr->data_size;
1398 e->offsets_size = tr->offsets_size;
1399
1400 if (reply) {
1401 in_reply_to = thread->transaction_stack;
1402 if (in_reply_to == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301403 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001404 proc->pid, thread->pid);
1405 return_error = BR_FAILED_REPLY;
1406 goto err_empty_call_stack;
1407 }
1408 binder_set_nice(in_reply_to->saved_priority);
1409 if (in_reply_to->to_thread != thread) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301410 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001411 proc->pid, thread->pid, in_reply_to->debug_id,
1412 in_reply_to->to_proc ?
1413 in_reply_to->to_proc->pid : 0,
1414 in_reply_to->to_thread ?
1415 in_reply_to->to_thread->pid : 0);
1416 return_error = BR_FAILED_REPLY;
1417 in_reply_to = NULL;
1418 goto err_bad_call_stack;
1419 }
1420 thread->transaction_stack = in_reply_to->to_parent;
1421 target_thread = in_reply_to->from;
1422 if (target_thread == NULL) {
1423 return_error = BR_DEAD_REPLY;
1424 goto err_dead_binder;
1425 }
1426 if (target_thread->transaction_stack != in_reply_to) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301427 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001428 proc->pid, thread->pid,
1429 target_thread->transaction_stack ?
1430 target_thread->transaction_stack->debug_id : 0,
1431 in_reply_to->debug_id);
1432 return_error = BR_FAILED_REPLY;
1433 in_reply_to = NULL;
1434 target_thread = NULL;
1435 goto err_dead_binder;
1436 }
1437 target_proc = target_thread->proc;
1438 } else {
1439 if (tr->target.handle) {
1440 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09001441
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001442 ref = binder_get_ref(proc, tr->target.handle, true);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001443 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301444 binder_user_error("%d:%d got transaction to invalid handle\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001445 proc->pid, thread->pid);
1446 return_error = BR_FAILED_REPLY;
1447 goto err_invalid_target_handle;
1448 }
1449 target_node = ref->node;
1450 } else {
1451 target_node = binder_context_mgr_node;
1452 if (target_node == NULL) {
1453 return_error = BR_DEAD_REPLY;
1454 goto err_no_context_mgr_node;
1455 }
1456 }
1457 e->to_node = target_node->debug_id;
1458 target_proc = target_node->proc;
1459 if (target_proc == NULL) {
1460 return_error = BR_DEAD_REPLY;
1461 goto err_dead_binder;
1462 }
Stephen Smalley79af7302015-01-21 10:54:10 -05001463 if (security_binder_transaction(proc->tsk,
1464 target_proc->tsk) < 0) {
1465 return_error = BR_FAILED_REPLY;
1466 goto err_invalid_target_handle;
1467 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001468 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1469 struct binder_transaction *tmp;
Seunghun Lee10f62862014-05-01 01:30:23 +09001470
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001471 tmp = thread->transaction_stack;
1472 if (tmp->to_thread != thread) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301473 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001474 proc->pid, thread->pid, tmp->debug_id,
1475 tmp->to_proc ? tmp->to_proc->pid : 0,
1476 tmp->to_thread ?
1477 tmp->to_thread->pid : 0);
1478 return_error = BR_FAILED_REPLY;
1479 goto err_bad_call_stack;
1480 }
1481 while (tmp) {
1482 if (tmp->from && tmp->from->proc == target_proc)
1483 target_thread = tmp->from;
1484 tmp = tmp->from_parent;
1485 }
1486 }
1487 }
1488 if (target_thread) {
1489 e->to_thread = target_thread->pid;
1490 target_list = &target_thread->todo;
1491 target_wait = &target_thread->wait;
1492 } else {
1493 target_list = &target_proc->todo;
1494 target_wait = &target_proc->wait;
1495 }
1496 e->to_proc = target_proc->pid;
1497
1498 /* TODO: reuse incoming transaction for reply */
1499 t = kzalloc(sizeof(*t), GFP_KERNEL);
1500 if (t == NULL) {
1501 return_error = BR_FAILED_REPLY;
1502 goto err_alloc_t_failed;
1503 }
1504 binder_stats_created(BINDER_STAT_TRANSACTION);
1505
1506 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1507 if (tcomplete == NULL) {
1508 return_error = BR_FAILED_REPLY;
1509 goto err_alloc_tcomplete_failed;
1510 }
1511 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1512
1513 t->debug_id = ++binder_last_id;
1514 e->debug_id = t->debug_id;
1515
1516 if (reply)
1517 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001518 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001519 proc->pid, thread->pid, t->debug_id,
1520 target_proc->pid, target_thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001521 (u64)tr->data.ptr.buffer,
1522 (u64)tr->data.ptr.offsets,
1523 (u64)tr->data_size, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001524 else
1525 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001526 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001527 proc->pid, thread->pid, t->debug_id,
1528 target_proc->pid, target_node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001529 (u64)tr->data.ptr.buffer,
1530 (u64)tr->data.ptr.offsets,
1531 (u64)tr->data_size, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001532
1533 if (!reply && !(tr->flags & TF_ONE_WAY))
1534 t->from = thread;
1535 else
1536 t->from = NULL;
Tair Rzayev57bab7c2014-05-31 22:43:34 +03001537 t->sender_euid = task_euid(proc->tsk);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001538 t->to_proc = target_proc;
1539 t->to_thread = target_thread;
1540 t->code = tr->code;
1541 t->flags = tr->flags;
1542 t->priority = task_nice(current);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001543
1544 trace_binder_transaction(reply, t, target_node);
1545
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001546 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1547 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1548 if (t->buffer == NULL) {
1549 return_error = BR_FAILED_REPLY;
1550 goto err_binder_alloc_buf_failed;
1551 }
1552 t->buffer->allow_user_free = 0;
1553 t->buffer->debug_id = t->debug_id;
1554 t->buffer->transaction = t;
1555 t->buffer->target_node = target_node;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001556 trace_binder_transaction_alloc_buf(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001557 if (target_node)
1558 binder_inc_node(target_node, 1, 0, NULL);
1559
Arve Hjønnevågda498892014-02-21 14:40:26 -08001560 offp = (binder_size_t *)(t->buffer->data +
1561 ALIGN(tr->data_size, sizeof(void *)));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001562
Arve Hjønnevågda498892014-02-21 14:40:26 -08001563 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1564 tr->data.ptr.buffer, tr->data_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301565 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1566 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001567 return_error = BR_FAILED_REPLY;
1568 goto err_copy_data_failed;
1569 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08001570 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1571 tr->data.ptr.offsets, tr->offsets_size)) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301572 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1573 proc->pid, thread->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001574 return_error = BR_FAILED_REPLY;
1575 goto err_copy_data_failed;
1576 }
Arve Hjønnevågda498892014-02-21 14:40:26 -08001577 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1578 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1579 proc->pid, thread->pid, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001580 return_error = BR_FAILED_REPLY;
1581 goto err_bad_offset;
1582 }
1583 off_end = (void *)offp + tr->offsets_size;
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001584 off_min = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001585 for (; offp < off_end; offp++) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001586 struct binder_object_header *hdr;
1587 size_t object_size = binder_validate_object(t->buffer, *offp);
Seunghun Lee10f62862014-05-01 01:30:23 +09001588
Martijn Coenenfeba3902017-02-03 14:40:45 -08001589 if (object_size == 0 || *offp < off_min) {
1590 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
Arve Hjønnevåg212265e2016-02-09 21:05:32 -08001591 proc->pid, thread->pid, (u64)*offp,
1592 (u64)off_min,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001593 (u64)t->buffer->data_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001594 return_error = BR_FAILED_REPLY;
1595 goto err_bad_offset;
1596 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001597
1598 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
1599 off_min = *offp + object_size;
1600 switch (hdr->type) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001601 case BINDER_TYPE_BINDER:
1602 case BINDER_TYPE_WEAK_BINDER: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001603 struct flat_binder_object *fp;
1604 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001605 struct binder_ref *ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09001606
Martijn Coenenfeba3902017-02-03 14:40:45 -08001607 fp = to_flat_binder_object(hdr);
1608 node = binder_get_node(proc, fp->binder);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001609 if (node == NULL) {
1610 node = binder_new_node(proc, fp->binder, fp->cookie);
1611 if (node == NULL) {
1612 return_error = BR_FAILED_REPLY;
1613 goto err_binder_new_node_failed;
1614 }
1615 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1616 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1617 }
1618 if (fp->cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001619 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001620 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001621 (u64)fp->binder, node->debug_id,
1622 (u64)fp->cookie, (u64)node->cookie);
Christian Engelmayer7d420432014-05-07 21:44:53 +02001623 return_error = BR_FAILED_REPLY;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001624 goto err_binder_get_ref_for_node_failed;
1625 }
Stephen Smalley79af7302015-01-21 10:54:10 -05001626 if (security_binder_transfer_binder(proc->tsk,
1627 target_proc->tsk)) {
1628 return_error = BR_FAILED_REPLY;
1629 goto err_binder_get_ref_for_node_failed;
1630 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001631 ref = binder_get_ref_for_node(target_proc, node);
1632 if (ref == NULL) {
1633 return_error = BR_FAILED_REPLY;
1634 goto err_binder_get_ref_for_node_failed;
1635 }
Martijn Coenenfeba3902017-02-03 14:40:45 -08001636 if (hdr->type == BINDER_TYPE_BINDER)
1637 hdr->type = BINDER_TYPE_HANDLE;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001638 else
Martijn Coenenfeba3902017-02-03 14:40:45 -08001639 hdr->type = BINDER_TYPE_WEAK_HANDLE;
Arve Hjønnevåg4afb6042016-10-24 15:20:30 +02001640 fp->binder = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001641 fp->handle = ref->desc;
Arve Hjønnevåg4afb6042016-10-24 15:20:30 +02001642 fp->cookie = 0;
Martijn Coenenfeba3902017-02-03 14:40:45 -08001643 binder_inc_ref(ref, hdr->type == BINDER_TYPE_HANDLE,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001644 &thread->todo);
1645
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001646 trace_binder_transaction_node_to_ref(t, node, ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001647 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001648 " node %d u%016llx -> ref %d desc %d\n",
1649 node->debug_id, (u64)node->ptr,
1650 ref->debug_id, ref->desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001651 } break;
1652 case BINDER_TYPE_HANDLE:
1653 case BINDER_TYPE_WEAK_HANDLE: {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001654 struct flat_binder_object *fp;
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001655 struct binder_ref *ref;
1656
Martijn Coenenfeba3902017-02-03 14:40:45 -08001657 fp = to_flat_binder_object(hdr);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001658 ref = binder_get_ref(proc, fp->handle,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001659 hdr->type == BINDER_TYPE_HANDLE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001660 if (ref == NULL) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001661 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
Anmol Sarma56b468f2012-10-30 22:35:43 +05301662 proc->pid,
1663 thread->pid, fp->handle);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001664 return_error = BR_FAILED_REPLY;
1665 goto err_binder_get_ref_failed;
1666 }
Stephen Smalley79af7302015-01-21 10:54:10 -05001667 if (security_binder_transfer_binder(proc->tsk,
1668 target_proc->tsk)) {
1669 return_error = BR_FAILED_REPLY;
1670 goto err_binder_get_ref_failed;
1671 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001672 if (ref->node->proc == target_proc) {
Martijn Coenenfeba3902017-02-03 14:40:45 -08001673 if (hdr->type == BINDER_TYPE_HANDLE)
1674 hdr->type = BINDER_TYPE_BINDER;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001675 else
Martijn Coenenfeba3902017-02-03 14:40:45 -08001676 hdr->type = BINDER_TYPE_WEAK_BINDER;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001677 fp->binder = ref->node->ptr;
1678 fp->cookie = ref->node->cookie;
Martijn Coenenfeba3902017-02-03 14:40:45 -08001679 binder_inc_node(ref->node,
1680 hdr->type == BINDER_TYPE_BINDER,
1681 0, NULL);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001682 trace_binder_transaction_ref_to_node(t, ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001683 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001684 " ref %d desc %d -> node %d u%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001685 ref->debug_id, ref->desc, ref->node->debug_id,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001686 (u64)ref->node->ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001687 } else {
1688 struct binder_ref *new_ref;
Seunghun Lee10f62862014-05-01 01:30:23 +09001689
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001690 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1691 if (new_ref == NULL) {
1692 return_error = BR_FAILED_REPLY;
1693 goto err_binder_get_ref_for_node_failed;
1694 }
Arve Hjønnevåg4afb6042016-10-24 15:20:30 +02001695 fp->binder = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001696 fp->handle = new_ref->desc;
Arve Hjønnevåg4afb6042016-10-24 15:20:30 +02001697 fp->cookie = 0;
Martijn Coenenfeba3902017-02-03 14:40:45 -08001698 binder_inc_ref(new_ref,
1699 hdr->type == BINDER_TYPE_HANDLE,
1700 NULL);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001701 trace_binder_transaction_ref_to_ref(t, ref,
1702 new_ref);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001703 binder_debug(BINDER_DEBUG_TRANSACTION,
1704 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1705 ref->debug_id, ref->desc, new_ref->debug_id,
1706 new_ref->desc, ref->node->debug_id);
1707 }
1708 } break;
1709
1710 case BINDER_TYPE_FD: {
1711 int target_fd;
1712 struct file *file;
Martijn Coenenfeba3902017-02-03 14:40:45 -08001713 struct binder_fd_object *fp = to_binder_fd_object(hdr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001714
1715 if (reply) {
1716 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001717 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001718 proc->pid, thread->pid, fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001719 return_error = BR_FAILED_REPLY;
1720 goto err_fd_not_allowed;
1721 }
1722 } else if (!target_node->accept_fds) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001723 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001724 proc->pid, thread->pid, fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001725 return_error = BR_FAILED_REPLY;
1726 goto err_fd_not_allowed;
1727 }
1728
Martijn Coenenfeba3902017-02-03 14:40:45 -08001729 file = fget(fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001730 if (file == NULL) {
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001731 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001732 proc->pid, thread->pid, fp->fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001733 return_error = BR_FAILED_REPLY;
1734 goto err_fget_failed;
1735 }
Stephen Smalley79af7302015-01-21 10:54:10 -05001736 if (security_binder_transfer_file(proc->tsk,
1737 target_proc->tsk,
1738 file) < 0) {
1739 fput(file);
1740 return_error = BR_FAILED_REPLY;
1741 goto err_get_unused_fd_failed;
1742 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001743 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1744 if (target_fd < 0) {
1745 fput(file);
1746 return_error = BR_FAILED_REPLY;
1747 goto err_get_unused_fd_failed;
1748 }
1749 task_fd_install(target_proc, target_fd, file);
Martijn Coenenfeba3902017-02-03 14:40:45 -08001750 trace_binder_transaction_fd(t, fp->fd, target_fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001751 binder_debug(BINDER_DEBUG_TRANSACTION,
Martijn Coenenfeba3902017-02-03 14:40:45 -08001752 " fd %d -> %d\n", fp->fd,
1753 target_fd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001754 /* TODO: fput? */
Martijn Coenenfeba3902017-02-03 14:40:45 -08001755 fp->pad_binder = 0;
1756 fp->fd = target_fd;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001757 } break;
1758
1759 default:
Serban Constantinescu64dcfe62013-07-04 10:54:48 +01001760 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
Martijn Coenenfeba3902017-02-03 14:40:45 -08001761 proc->pid, thread->pid, hdr->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001762 return_error = BR_FAILED_REPLY;
1763 goto err_bad_object_type;
1764 }
1765 }
1766 if (reply) {
1767 BUG_ON(t->buffer->async_transaction != 0);
1768 binder_pop_transaction(target_thread, in_reply_to);
1769 } else if (!(t->flags & TF_ONE_WAY)) {
1770 BUG_ON(t->buffer->async_transaction != 0);
1771 t->need_reply = 1;
1772 t->from_parent = thread->transaction_stack;
1773 thread->transaction_stack = t;
1774 } else {
1775 BUG_ON(target_node == NULL);
1776 BUG_ON(t->buffer->async_transaction != 1);
1777 if (target_node->has_async_transaction) {
1778 target_list = &target_node->async_todo;
1779 target_wait = NULL;
1780 } else
1781 target_node->has_async_transaction = 1;
1782 }
1783 t->work.type = BINDER_WORK_TRANSACTION;
1784 list_add_tail(&t->work.entry, target_list);
1785 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1786 list_add_tail(&tcomplete->entry, &thread->todo);
1787 if (target_wait)
1788 wake_up_interruptible(target_wait);
1789 return;
1790
1791err_get_unused_fd_failed:
1792err_fget_failed:
1793err_fd_not_allowed:
1794err_binder_get_ref_for_node_failed:
1795err_binder_get_ref_failed:
1796err_binder_new_node_failed:
1797err_bad_object_type:
1798err_bad_offset:
1799err_copy_data_failed:
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001800 trace_binder_transaction_failed_buffer_release(t->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001801 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1802 t->buffer->transaction = NULL;
1803 binder_free_buf(target_proc, t->buffer);
1804err_binder_alloc_buf_failed:
1805 kfree(tcomplete);
1806 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1807err_alloc_tcomplete_failed:
1808 kfree(t);
1809 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1810err_alloc_t_failed:
1811err_bad_call_stack:
1812err_empty_call_stack:
1813err_dead_binder:
1814err_invalid_target_handle:
1815err_no_context_mgr_node:
1816 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001817 "%d:%d transaction failed %d, size %lld-%lld\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001818 proc->pid, thread->pid, return_error,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001819 (u64)tr->data_size, (u64)tr->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001820
1821 {
1822 struct binder_transaction_log_entry *fe;
Seunghun Lee10f62862014-05-01 01:30:23 +09001823
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001824 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1825 *fe = *e;
1826 }
1827
1828 BUG_ON(thread->return_error != BR_OK);
1829 if (in_reply_to) {
1830 thread->return_error = BR_TRANSACTION_COMPLETE;
1831 binder_send_failed_reply(in_reply_to, return_error);
1832 } else
1833 thread->return_error = return_error;
1834}
1835
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02001836static int binder_thread_write(struct binder_proc *proc,
1837 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001838 binder_uintptr_t binder_buffer, size_t size,
1839 binder_size_t *consumed)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001840{
1841 uint32_t cmd;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001842 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001843 void __user *ptr = buffer + *consumed;
1844 void __user *end = buffer + size;
1845
1846 while (ptr < end && thread->return_error == BR_OK) {
1847 if (get_user(cmd, (uint32_t __user *)ptr))
1848 return -EFAULT;
1849 ptr += sizeof(uint32_t);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07001850 trace_binder_command(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001851 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1852 binder_stats.bc[_IOC_NR(cmd)]++;
1853 proc->stats.bc[_IOC_NR(cmd)]++;
1854 thread->stats.bc[_IOC_NR(cmd)]++;
1855 }
1856 switch (cmd) {
1857 case BC_INCREFS:
1858 case BC_ACQUIRE:
1859 case BC_RELEASE:
1860 case BC_DECREFS: {
1861 uint32_t target;
1862 struct binder_ref *ref;
1863 const char *debug_string;
1864
1865 if (get_user(target, (uint32_t __user *)ptr))
1866 return -EFAULT;
1867 ptr += sizeof(uint32_t);
1868 if (target == 0 && binder_context_mgr_node &&
1869 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1870 ref = binder_get_ref_for_node(proc,
1871 binder_context_mgr_node);
1872 if (ref->desc != target) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301873 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001874 proc->pid, thread->pid,
1875 ref->desc);
1876 }
1877 } else
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02001878 ref = binder_get_ref(proc, target,
1879 cmd == BC_ACQUIRE ||
1880 cmd == BC_RELEASE);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001881 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301882 binder_user_error("%d:%d refcount change on invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001883 proc->pid, thread->pid, target);
1884 break;
1885 }
1886 switch (cmd) {
1887 case BC_INCREFS:
1888 debug_string = "IncRefs";
1889 binder_inc_ref(ref, 0, NULL);
1890 break;
1891 case BC_ACQUIRE:
1892 debug_string = "Acquire";
1893 binder_inc_ref(ref, 1, NULL);
1894 break;
1895 case BC_RELEASE:
1896 debug_string = "Release";
1897 binder_dec_ref(ref, 1);
1898 break;
1899 case BC_DECREFS:
1900 default:
1901 debug_string = "DecRefs";
1902 binder_dec_ref(ref, 0);
1903 break;
1904 }
1905 binder_debug(BINDER_DEBUG_USER_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301906 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001907 proc->pid, thread->pid, debug_string, ref->debug_id,
1908 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1909 break;
1910 }
1911 case BC_INCREFS_DONE:
1912 case BC_ACQUIRE_DONE: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001913 binder_uintptr_t node_ptr;
1914 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001915 struct binder_node *node;
1916
Arve Hjønnevågda498892014-02-21 14:40:26 -08001917 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001918 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001919 ptr += sizeof(binder_uintptr_t);
1920 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001921 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001922 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001923 node = binder_get_node(proc, node_ptr);
1924 if (node == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001925 binder_user_error("%d:%d %s u%016llx no match\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001926 proc->pid, thread->pid,
1927 cmd == BC_INCREFS_DONE ?
1928 "BC_INCREFS_DONE" :
1929 "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08001930 (u64)node_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001931 break;
1932 }
1933 if (cookie != node->cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001934 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001935 proc->pid, thread->pid,
1936 cmd == BC_INCREFS_DONE ?
1937 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08001938 (u64)node_ptr, node->debug_id,
1939 (u64)cookie, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001940 break;
1941 }
1942 if (cmd == BC_ACQUIRE_DONE) {
1943 if (node->pending_strong_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301944 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001945 proc->pid, thread->pid,
1946 node->debug_id);
1947 break;
1948 }
1949 node->pending_strong_ref = 0;
1950 } else {
1951 if (node->pending_weak_ref == 0) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05301952 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001953 proc->pid, thread->pid,
1954 node->debug_id);
1955 break;
1956 }
1957 node->pending_weak_ref = 0;
1958 }
1959 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1960 binder_debug(BINDER_DEBUG_USER_REFS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05301961 "%d:%d %s node %d ls %d lw %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001962 proc->pid, thread->pid,
1963 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1964 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1965 break;
1966 }
1967 case BC_ATTEMPT_ACQUIRE:
Anmol Sarma56b468f2012-10-30 22:35:43 +05301968 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001969 return -EINVAL;
1970 case BC_ACQUIRE_RESULT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05301971 pr_err("BC_ACQUIRE_RESULT not supported\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001972 return -EINVAL;
1973
1974 case BC_FREE_BUFFER: {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001975 binder_uintptr_t data_ptr;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001976 struct binder_buffer *buffer;
1977
Arve Hjønnevågda498892014-02-21 14:40:26 -08001978 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001979 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08001980 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001981
1982 buffer = binder_buffer_lookup(proc, data_ptr);
1983 if (buffer == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001984 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1985 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001986 break;
1987 }
1988 if (!buffer->allow_user_free) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08001989 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
1990 proc->pid, thread->pid, (u64)data_ptr);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001991 break;
1992 }
1993 binder_debug(BINDER_DEBUG_FREE_BUFFER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08001994 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
1995 proc->pid, thread->pid, (u64)data_ptr,
1996 buffer->debug_id,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09001997 buffer->transaction ? "active" : "finished");
1998
1999 if (buffer->transaction) {
2000 buffer->transaction->buffer = NULL;
2001 buffer->transaction = NULL;
2002 }
2003 if (buffer->async_transaction && buffer->target_node) {
2004 BUG_ON(!buffer->target_node->has_async_transaction);
2005 if (list_empty(&buffer->target_node->async_todo))
2006 buffer->target_node->has_async_transaction = 0;
2007 else
2008 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2009 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002010 trace_binder_transaction_buffer_release(buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002011 binder_transaction_buffer_release(proc, buffer, NULL);
2012 binder_free_buf(proc, buffer);
2013 break;
2014 }
2015
2016 case BC_TRANSACTION:
2017 case BC_REPLY: {
2018 struct binder_transaction_data tr;
2019
2020 if (copy_from_user(&tr, ptr, sizeof(tr)))
2021 return -EFAULT;
2022 ptr += sizeof(tr);
2023 binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
2024 break;
2025 }
2026
2027 case BC_REGISTER_LOOPER:
2028 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302029 "%d:%d BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002030 proc->pid, thread->pid);
2031 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2032 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302033 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002034 proc->pid, thread->pid);
2035 } else if (proc->requested_threads == 0) {
2036 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302037 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002038 proc->pid, thread->pid);
2039 } else {
2040 proc->requested_threads--;
2041 proc->requested_threads_started++;
2042 }
2043 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2044 break;
2045 case BC_ENTER_LOOPER:
2046 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302047 "%d:%d BC_ENTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002048 proc->pid, thread->pid);
2049 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2050 thread->looper |= BINDER_LOOPER_STATE_INVALID;
Anmol Sarma56b468f2012-10-30 22:35:43 +05302051 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002052 proc->pid, thread->pid);
2053 }
2054 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2055 break;
2056 case BC_EXIT_LOOPER:
2057 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302058 "%d:%d BC_EXIT_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002059 proc->pid, thread->pid);
2060 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2061 break;
2062
2063 case BC_REQUEST_DEATH_NOTIFICATION:
2064 case BC_CLEAR_DEATH_NOTIFICATION: {
2065 uint32_t target;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002066 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002067 struct binder_ref *ref;
2068 struct binder_ref_death *death;
2069
2070 if (get_user(target, (uint32_t __user *)ptr))
2071 return -EFAULT;
2072 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08002073 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002074 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002075 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg0a3ffab2016-10-24 15:20:29 +02002076 ref = binder_get_ref(proc, target, false);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002077 if (ref == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302078 binder_user_error("%d:%d %s invalid ref %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002079 proc->pid, thread->pid,
2080 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2081 "BC_REQUEST_DEATH_NOTIFICATION" :
2082 "BC_CLEAR_DEATH_NOTIFICATION",
2083 target);
2084 break;
2085 }
2086
2087 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002088 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002089 proc->pid, thread->pid,
2090 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2091 "BC_REQUEST_DEATH_NOTIFICATION" :
2092 "BC_CLEAR_DEATH_NOTIFICATION",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002093 (u64)cookie, ref->debug_id, ref->desc,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002094 ref->strong, ref->weak, ref->node->debug_id);
2095
2096 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2097 if (ref->death) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302098 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002099 proc->pid, thread->pid);
2100 break;
2101 }
2102 death = kzalloc(sizeof(*death), GFP_KERNEL);
2103 if (death == NULL) {
2104 thread->return_error = BR_ERROR;
2105 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302106 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002107 proc->pid, thread->pid);
2108 break;
2109 }
2110 binder_stats_created(BINDER_STAT_DEATH);
2111 INIT_LIST_HEAD(&death->work.entry);
2112 death->cookie = cookie;
2113 ref->death = death;
2114 if (ref->node->proc == NULL) {
2115 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2116 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2117 list_add_tail(&ref->death->work.entry, &thread->todo);
2118 } else {
2119 list_add_tail(&ref->death->work.entry, &proc->todo);
2120 wake_up_interruptible(&proc->wait);
2121 }
2122 }
2123 } else {
2124 if (ref->death == NULL) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302125 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002126 proc->pid, thread->pid);
2127 break;
2128 }
2129 death = ref->death;
2130 if (death->cookie != cookie) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002131 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002132 proc->pid, thread->pid,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002133 (u64)death->cookie,
2134 (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002135 break;
2136 }
2137 ref->death = NULL;
2138 if (list_empty(&death->work.entry)) {
2139 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2140 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2141 list_add_tail(&death->work.entry, &thread->todo);
2142 } else {
2143 list_add_tail(&death->work.entry, &proc->todo);
2144 wake_up_interruptible(&proc->wait);
2145 }
2146 } else {
2147 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2148 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2149 }
2150 }
2151 } break;
2152 case BC_DEAD_BINDER_DONE: {
2153 struct binder_work *w;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002154 binder_uintptr_t cookie;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002155 struct binder_ref_death *death = NULL;
Seunghun Lee10f62862014-05-01 01:30:23 +09002156
Arve Hjønnevågda498892014-02-21 14:40:26 -08002157 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002158 return -EFAULT;
2159
Lisa Du7a64cd82016-02-17 09:32:52 +08002160 ptr += sizeof(cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002161 list_for_each_entry(w, &proc->delivered_death, entry) {
2162 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
Seunghun Lee10f62862014-05-01 01:30:23 +09002163
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002164 if (tmp_death->cookie == cookie) {
2165 death = tmp_death;
2166 break;
2167 }
2168 }
2169 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002170 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2171 proc->pid, thread->pid, (u64)cookie,
2172 death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002173 if (death == NULL) {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002174 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2175 proc->pid, thread->pid, (u64)cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002176 break;
2177 }
2178
2179 list_del_init(&death->work.entry);
2180 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2181 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2182 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2183 list_add_tail(&death->work.entry, &thread->todo);
2184 } else {
2185 list_add_tail(&death->work.entry, &proc->todo);
2186 wake_up_interruptible(&proc->wait);
2187 }
2188 }
2189 } break;
2190
2191 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302192 pr_err("%d:%d unknown command %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002193 proc->pid, thread->pid, cmd);
2194 return -EINVAL;
2195 }
2196 *consumed = ptr - buffer;
2197 }
2198 return 0;
2199}
2200
Bojan Prtvarfb07ebc2013-09-02 08:18:40 +02002201static void binder_stat_br(struct binder_proc *proc,
2202 struct binder_thread *thread, uint32_t cmd)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002203{
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002204 trace_binder_return(cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002205 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2206 binder_stats.br[_IOC_NR(cmd)]++;
2207 proc->stats.br[_IOC_NR(cmd)]++;
2208 thread->stats.br[_IOC_NR(cmd)]++;
2209 }
2210}
2211
2212static int binder_has_proc_work(struct binder_proc *proc,
2213 struct binder_thread *thread)
2214{
2215 return !list_empty(&proc->todo) ||
2216 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2217}
2218
2219static int binder_has_thread_work(struct binder_thread *thread)
2220{
2221 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2222 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2223}
2224
2225static int binder_thread_read(struct binder_proc *proc,
2226 struct binder_thread *thread,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002227 binder_uintptr_t binder_buffer, size_t size,
2228 binder_size_t *consumed, int non_block)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002229{
Arve Hjønnevågda498892014-02-21 14:40:26 -08002230 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002231 void __user *ptr = buffer + *consumed;
2232 void __user *end = buffer + size;
2233
2234 int ret = 0;
2235 int wait_for_proc_work;
2236
2237 if (*consumed == 0) {
2238 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2239 return -EFAULT;
2240 ptr += sizeof(uint32_t);
2241 }
2242
2243retry:
2244 wait_for_proc_work = thread->transaction_stack == NULL &&
2245 list_empty(&thread->todo);
2246
2247 if (thread->return_error != BR_OK && ptr < end) {
2248 if (thread->return_error2 != BR_OK) {
2249 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2250 return -EFAULT;
2251 ptr += sizeof(uint32_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002252 binder_stat_br(proc, thread, thread->return_error2);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002253 if (ptr == end)
2254 goto done;
2255 thread->return_error2 = BR_OK;
2256 }
2257 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2258 return -EFAULT;
2259 ptr += sizeof(uint32_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002260 binder_stat_br(proc, thread, thread->return_error);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002261 thread->return_error = BR_OK;
2262 goto done;
2263 }
2264
2265
2266 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2267 if (wait_for_proc_work)
2268 proc->ready_threads++;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002269
2270 binder_unlock(__func__);
2271
2272 trace_binder_wait_for_work(wait_for_proc_work,
2273 !!thread->transaction_stack,
2274 !list_empty(&thread->todo));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002275 if (wait_for_proc_work) {
2276 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2277 BINDER_LOOPER_STATE_ENTERED))) {
Anmol Sarma56b468f2012-10-30 22:35:43 +05302278 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002279 proc->pid, thread->pid, thread->looper);
2280 wait_event_interruptible(binder_user_error_wait,
2281 binder_stop_on_user_error < 2);
2282 }
2283 binder_set_nice(proc->default_priority);
2284 if (non_block) {
2285 if (!binder_has_proc_work(proc, thread))
2286 ret = -EAGAIN;
2287 } else
Colin Crosse2610b22013-05-06 23:50:15 +00002288 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002289 } else {
2290 if (non_block) {
2291 if (!binder_has_thread_work(thread))
2292 ret = -EAGAIN;
2293 } else
Colin Crosse2610b22013-05-06 23:50:15 +00002294 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002295 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002296
2297 binder_lock(__func__);
2298
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002299 if (wait_for_proc_work)
2300 proc->ready_threads--;
2301 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2302
2303 if (ret)
2304 return ret;
2305
2306 while (1) {
2307 uint32_t cmd;
2308 struct binder_transaction_data tr;
2309 struct binder_work *w;
2310 struct binder_transaction *t = NULL;
2311
Dmitry Voytik395262a2014-09-08 18:16:34 +04002312 if (!list_empty(&thread->todo)) {
2313 w = list_first_entry(&thread->todo, struct binder_work,
2314 entry);
2315 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2316 w = list_first_entry(&proc->todo, struct binder_work,
2317 entry);
2318 } else {
2319 /* no data added */
2320 if (ptr - buffer == 4 &&
2321 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002322 goto retry;
2323 break;
2324 }
2325
2326 if (end - ptr < sizeof(tr) + 4)
2327 break;
2328
2329 switch (w->type) {
2330 case BINDER_WORK_TRANSACTION: {
2331 t = container_of(w, struct binder_transaction, work);
2332 } break;
2333 case BINDER_WORK_TRANSACTION_COMPLETE: {
2334 cmd = BR_TRANSACTION_COMPLETE;
2335 if (put_user(cmd, (uint32_t __user *)ptr))
2336 return -EFAULT;
2337 ptr += sizeof(uint32_t);
2338
2339 binder_stat_br(proc, thread, cmd);
2340 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302341 "%d:%d BR_TRANSACTION_COMPLETE\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002342 proc->pid, thread->pid);
2343
2344 list_del(&w->entry);
2345 kfree(w);
2346 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2347 } break;
2348 case BINDER_WORK_NODE: {
2349 struct binder_node *node = container_of(w, struct binder_node, work);
2350 uint32_t cmd = BR_NOOP;
2351 const char *cmd_name;
2352 int strong = node->internal_strong_refs || node->local_strong_refs;
2353 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
Seunghun Lee10f62862014-05-01 01:30:23 +09002354
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002355 if (weak && !node->has_weak_ref) {
2356 cmd = BR_INCREFS;
2357 cmd_name = "BR_INCREFS";
2358 node->has_weak_ref = 1;
2359 node->pending_weak_ref = 1;
2360 node->local_weak_refs++;
2361 } else if (strong && !node->has_strong_ref) {
2362 cmd = BR_ACQUIRE;
2363 cmd_name = "BR_ACQUIRE";
2364 node->has_strong_ref = 1;
2365 node->pending_strong_ref = 1;
2366 node->local_strong_refs++;
2367 } else if (!strong && node->has_strong_ref) {
2368 cmd = BR_RELEASE;
2369 cmd_name = "BR_RELEASE";
2370 node->has_strong_ref = 0;
2371 } else if (!weak && node->has_weak_ref) {
2372 cmd = BR_DECREFS;
2373 cmd_name = "BR_DECREFS";
2374 node->has_weak_ref = 0;
2375 }
2376 if (cmd != BR_NOOP) {
2377 if (put_user(cmd, (uint32_t __user *)ptr))
2378 return -EFAULT;
2379 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08002380 if (put_user(node->ptr,
2381 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002382 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002383 ptr += sizeof(binder_uintptr_t);
2384 if (put_user(node->cookie,
2385 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002386 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002387 ptr += sizeof(binder_uintptr_t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002388
2389 binder_stat_br(proc, thread, cmd);
2390 binder_debug(BINDER_DEBUG_USER_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002391 "%d:%d %s %d u%016llx c%016llx\n",
2392 proc->pid, thread->pid, cmd_name,
2393 node->debug_id,
2394 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002395 } else {
2396 list_del_init(&w->entry);
2397 if (!weak && !strong) {
2398 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002399 "%d:%d node %d u%016llx c%016llx deleted\n",
2400 proc->pid, thread->pid,
2401 node->debug_id,
2402 (u64)node->ptr,
2403 (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002404 rb_erase(&node->rb_node, &proc->nodes);
2405 kfree(node);
2406 binder_stats_deleted(BINDER_STAT_NODE);
2407 } else {
2408 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002409 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2410 proc->pid, thread->pid,
2411 node->debug_id,
2412 (u64)node->ptr,
2413 (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002414 }
2415 }
2416 } break;
2417 case BINDER_WORK_DEAD_BINDER:
2418 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2419 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2420 struct binder_ref_death *death;
2421 uint32_t cmd;
2422
2423 death = container_of(w, struct binder_ref_death, work);
2424 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2425 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2426 else
2427 cmd = BR_DEAD_BINDER;
2428 if (put_user(cmd, (uint32_t __user *)ptr))
2429 return -EFAULT;
2430 ptr += sizeof(uint32_t);
Arve Hjønnevågda498892014-02-21 14:40:26 -08002431 if (put_user(death->cookie,
2432 (binder_uintptr_t __user *)ptr))
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002433 return -EFAULT;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002434 ptr += sizeof(binder_uintptr_t);
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002435 binder_stat_br(proc, thread, cmd);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002436 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002437 "%d:%d %s %016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002438 proc->pid, thread->pid,
2439 cmd == BR_DEAD_BINDER ?
2440 "BR_DEAD_BINDER" :
2441 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
Arve Hjønnevågda498892014-02-21 14:40:26 -08002442 (u64)death->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002443
2444 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2445 list_del(&w->entry);
2446 kfree(death);
2447 binder_stats_deleted(BINDER_STAT_DEATH);
2448 } else
2449 list_move(&w->entry, &proc->delivered_death);
2450 if (cmd == BR_DEAD_BINDER)
2451 goto done; /* DEAD_BINDER notifications can cause transactions */
2452 } break;
2453 }
2454
2455 if (!t)
2456 continue;
2457
2458 BUG_ON(t->buffer == NULL);
2459 if (t->buffer->target_node) {
2460 struct binder_node *target_node = t->buffer->target_node;
Seunghun Lee10f62862014-05-01 01:30:23 +09002461
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002462 tr.target.ptr = target_node->ptr;
2463 tr.cookie = target_node->cookie;
2464 t->saved_priority = task_nice(current);
2465 if (t->priority < target_node->min_priority &&
2466 !(t->flags & TF_ONE_WAY))
2467 binder_set_nice(t->priority);
2468 else if (!(t->flags & TF_ONE_WAY) ||
2469 t->saved_priority > target_node->min_priority)
2470 binder_set_nice(target_node->min_priority);
2471 cmd = BR_TRANSACTION;
2472 } else {
Arve Hjønnevågda498892014-02-21 14:40:26 -08002473 tr.target.ptr = 0;
2474 tr.cookie = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002475 cmd = BR_REPLY;
2476 }
2477 tr.code = t->code;
2478 tr.flags = t->flags;
Eric W. Biederman4a2ebb92012-05-25 18:34:53 -06002479 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002480
2481 if (t->from) {
2482 struct task_struct *sender = t->from->proc->tsk;
Seunghun Lee10f62862014-05-01 01:30:23 +09002483
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002484 tr.sender_pid = task_tgid_nr_ns(sender,
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08002485 task_active_pid_ns(current));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002486 } else {
2487 tr.sender_pid = 0;
2488 }
2489
2490 tr.data_size = t->buffer->data_size;
2491 tr.offsets_size = t->buffer->offsets_size;
Arve Hjønnevågda498892014-02-21 14:40:26 -08002492 tr.data.ptr.buffer = (binder_uintptr_t)(
2493 (uintptr_t)t->buffer->data +
2494 proc->user_buffer_offset);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002495 tr.data.ptr.offsets = tr.data.ptr.buffer +
2496 ALIGN(t->buffer->data_size,
2497 sizeof(void *));
2498
2499 if (put_user(cmd, (uint32_t __user *)ptr))
2500 return -EFAULT;
2501 ptr += sizeof(uint32_t);
2502 if (copy_to_user(ptr, &tr, sizeof(tr)))
2503 return -EFAULT;
2504 ptr += sizeof(tr);
2505
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002506 trace_binder_transaction_received(t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002507 binder_stat_br(proc, thread, cmd);
2508 binder_debug(BINDER_DEBUG_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002509 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002510 proc->pid, thread->pid,
2511 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2512 "BR_REPLY",
2513 t->debug_id, t->from ? t->from->proc->pid : 0,
2514 t->from ? t->from->pid : 0, cmd,
2515 t->buffer->data_size, t->buffer->offsets_size,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002516 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002517
2518 list_del(&t->work.entry);
2519 t->buffer->allow_user_free = 1;
2520 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2521 t->to_parent = thread->transaction_stack;
2522 t->to_thread = thread;
2523 thread->transaction_stack = t;
2524 } else {
2525 t->buffer->transaction = NULL;
2526 kfree(t);
2527 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2528 }
2529 break;
2530 }
2531
2532done:
2533
2534 *consumed = ptr - buffer;
2535 if (proc->requested_threads + proc->ready_threads == 0 &&
2536 proc->requested_threads_started < proc->max_threads &&
2537 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2538 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2539 /*spawn a new thread if we leave this out */) {
2540 proc->requested_threads++;
2541 binder_debug(BINDER_DEBUG_THREADS,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302542 "%d:%d BR_SPAWN_LOOPER\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002543 proc->pid, thread->pid);
2544 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2545 return -EFAULT;
Arve Hjønnevåg89334ab2012-10-16 15:29:52 -07002546 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002547 }
2548 return 0;
2549}
2550
2551static void binder_release_work(struct list_head *list)
2552{
2553 struct binder_work *w;
Seunghun Lee10f62862014-05-01 01:30:23 +09002554
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002555 while (!list_empty(list)) {
2556 w = list_first_entry(list, struct binder_work, entry);
2557 list_del_init(&w->entry);
2558 switch (w->type) {
2559 case BINDER_WORK_TRANSACTION: {
2560 struct binder_transaction *t;
2561
2562 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002563 if (t->buffer->target_node &&
2564 !(t->flags & TF_ONE_WAY)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002565 binder_send_failed_reply(t, BR_DEAD_REPLY);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002566 } else {
2567 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302568 "undelivered transaction %d\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002569 t->debug_id);
2570 t->buffer->transaction = NULL;
2571 kfree(t);
2572 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2573 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002574 } break;
2575 case BINDER_WORK_TRANSACTION_COMPLETE: {
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002576 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302577 "undelivered TRANSACTION_COMPLETE\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002578 kfree(w);
2579 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2580 } break;
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002581 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2582 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2583 struct binder_ref_death *death;
2584
2585 death = container_of(w, struct binder_ref_death, work);
2586 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Arve Hjønnevågda498892014-02-21 14:40:26 -08002587 "undelivered death notification, %016llx\n",
2588 (u64)death->cookie);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002589 kfree(death);
2590 binder_stats_deleted(BINDER_STAT_DEATH);
2591 } break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002592 default:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302593 pr_err("unexpected work type, %d, not freed\n",
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07002594 w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002595 break;
2596 }
2597 }
2598
2599}
2600
2601static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2602{
2603 struct binder_thread *thread = NULL;
2604 struct rb_node *parent = NULL;
2605 struct rb_node **p = &proc->threads.rb_node;
2606
2607 while (*p) {
2608 parent = *p;
2609 thread = rb_entry(parent, struct binder_thread, rb_node);
2610
2611 if (current->pid < thread->pid)
2612 p = &(*p)->rb_left;
2613 else if (current->pid > thread->pid)
2614 p = &(*p)->rb_right;
2615 else
2616 break;
2617 }
2618 if (*p == NULL) {
2619 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2620 if (thread == NULL)
2621 return NULL;
2622 binder_stats_created(BINDER_STAT_THREAD);
2623 thread->proc = proc;
2624 thread->pid = current->pid;
2625 init_waitqueue_head(&thread->wait);
2626 INIT_LIST_HEAD(&thread->todo);
2627 rb_link_node(&thread->rb_node, parent, p);
2628 rb_insert_color(&thread->rb_node, &proc->threads);
2629 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2630 thread->return_error = BR_OK;
2631 thread->return_error2 = BR_OK;
2632 }
2633 return thread;
2634}
2635
2636static int binder_free_thread(struct binder_proc *proc,
2637 struct binder_thread *thread)
2638{
2639 struct binder_transaction *t;
2640 struct binder_transaction *send_reply = NULL;
2641 int active_transactions = 0;
2642
2643 rb_erase(&thread->rb_node, &proc->threads);
2644 t = thread->transaction_stack;
2645 if (t && t->to_thread == thread)
2646 send_reply = t;
2647 while (t) {
2648 active_transactions++;
2649 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302650 "release %d:%d transaction %d %s, still active\n",
2651 proc->pid, thread->pid,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002652 t->debug_id,
2653 (t->to_thread == thread) ? "in" : "out");
2654
2655 if (t->to_thread == thread) {
2656 t->to_proc = NULL;
2657 t->to_thread = NULL;
2658 if (t->buffer) {
2659 t->buffer->transaction = NULL;
2660 t->buffer = NULL;
2661 }
2662 t = t->to_parent;
2663 } else if (t->from == thread) {
2664 t->from = NULL;
2665 t = t->from_parent;
2666 } else
2667 BUG();
2668 }
2669 if (send_reply)
2670 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2671 binder_release_work(&thread->todo);
2672 kfree(thread);
2673 binder_stats_deleted(BINDER_STAT_THREAD);
2674 return active_transactions;
2675}
2676
2677static unsigned int binder_poll(struct file *filp,
2678 struct poll_table_struct *wait)
2679{
2680 struct binder_proc *proc = filp->private_data;
2681 struct binder_thread *thread = NULL;
2682 int wait_for_proc_work;
2683
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002684 binder_lock(__func__);
2685
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002686 thread = binder_get_thread(proc);
2687
2688 wait_for_proc_work = thread->transaction_stack == NULL &&
2689 list_empty(&thread->todo) && thread->return_error == BR_OK;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002690
2691 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002692
2693 if (wait_for_proc_work) {
2694 if (binder_has_proc_work(proc, thread))
2695 return POLLIN;
2696 poll_wait(filp, &proc->wait, wait);
2697 if (binder_has_proc_work(proc, thread))
2698 return POLLIN;
2699 } else {
2700 if (binder_has_thread_work(thread))
2701 return POLLIN;
2702 poll_wait(filp, &thread->wait, wait);
2703 if (binder_has_thread_work(thread))
2704 return POLLIN;
2705 }
2706 return 0;
2707}
2708
Tair Rzayev78260ac2014-06-03 22:27:21 +03002709static int binder_ioctl_write_read(struct file *filp,
2710 unsigned int cmd, unsigned long arg,
2711 struct binder_thread *thread)
2712{
2713 int ret = 0;
2714 struct binder_proc *proc = filp->private_data;
2715 unsigned int size = _IOC_SIZE(cmd);
2716 void __user *ubuf = (void __user *)arg;
2717 struct binder_write_read bwr;
2718
2719 if (size != sizeof(struct binder_write_read)) {
2720 ret = -EINVAL;
2721 goto out;
2722 }
2723 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2724 ret = -EFAULT;
2725 goto out;
2726 }
2727 binder_debug(BINDER_DEBUG_READ_WRITE,
2728 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2729 proc->pid, thread->pid,
2730 (u64)bwr.write_size, (u64)bwr.write_buffer,
2731 (u64)bwr.read_size, (u64)bwr.read_buffer);
2732
2733 if (bwr.write_size > 0) {
2734 ret = binder_thread_write(proc, thread,
2735 bwr.write_buffer,
2736 bwr.write_size,
2737 &bwr.write_consumed);
2738 trace_binder_write_done(ret);
2739 if (ret < 0) {
2740 bwr.read_consumed = 0;
2741 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2742 ret = -EFAULT;
2743 goto out;
2744 }
2745 }
2746 if (bwr.read_size > 0) {
2747 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2748 bwr.read_size,
2749 &bwr.read_consumed,
2750 filp->f_flags & O_NONBLOCK);
2751 trace_binder_read_done(ret);
2752 if (!list_empty(&proc->todo))
2753 wake_up_interruptible(&proc->wait);
2754 if (ret < 0) {
2755 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2756 ret = -EFAULT;
2757 goto out;
2758 }
2759 }
2760 binder_debug(BINDER_DEBUG_READ_WRITE,
2761 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2762 proc->pid, thread->pid,
2763 (u64)bwr.write_consumed, (u64)bwr.write_size,
2764 (u64)bwr.read_consumed, (u64)bwr.read_size);
2765 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2766 ret = -EFAULT;
2767 goto out;
2768 }
2769out:
2770 return ret;
2771}
2772
2773static int binder_ioctl_set_ctx_mgr(struct file *filp)
2774{
2775 int ret = 0;
2776 struct binder_proc *proc = filp->private_data;
2777 kuid_t curr_euid = current_euid();
2778
2779 if (binder_context_mgr_node != NULL) {
2780 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2781 ret = -EBUSY;
2782 goto out;
2783 }
Stephen Smalley79af7302015-01-21 10:54:10 -05002784 ret = security_binder_set_context_mgr(proc->tsk);
2785 if (ret < 0)
2786 goto out;
Tair Rzayev78260ac2014-06-03 22:27:21 +03002787 if (uid_valid(binder_context_mgr_uid)) {
2788 if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
2789 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2790 from_kuid(&init_user_ns, curr_euid),
2791 from_kuid(&init_user_ns,
2792 binder_context_mgr_uid));
2793 ret = -EPERM;
2794 goto out;
2795 }
2796 } else {
2797 binder_context_mgr_uid = curr_euid;
2798 }
2799 binder_context_mgr_node = binder_new_node(proc, 0, 0);
2800 if (binder_context_mgr_node == NULL) {
2801 ret = -ENOMEM;
2802 goto out;
2803 }
2804 binder_context_mgr_node->local_weak_refs++;
2805 binder_context_mgr_node->local_strong_refs++;
2806 binder_context_mgr_node->has_strong_ref = 1;
2807 binder_context_mgr_node->has_weak_ref = 1;
2808out:
2809 return ret;
2810}
2811
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002812static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2813{
2814 int ret;
2815 struct binder_proc *proc = filp->private_data;
2816 struct binder_thread *thread;
2817 unsigned int size = _IOC_SIZE(cmd);
2818 void __user *ubuf = (void __user *)arg;
2819
Tair Rzayev78260ac2014-06-03 22:27:21 +03002820 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2821 proc->pid, current->pid, cmd, arg);*/
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002822
Chen Fenga906d692016-02-01 14:04:02 +08002823 if (unlikely(current->mm != proc->vma_vm_mm)) {
2824 pr_err("current mm mismatch proc mm\n");
2825 return -EINVAL;
2826 }
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002827 trace_binder_ioctl(cmd, arg);
2828
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002829 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2830 if (ret)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002831 goto err_unlocked;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002832
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002833 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002834 thread = binder_get_thread(proc);
2835 if (thread == NULL) {
2836 ret = -ENOMEM;
2837 goto err;
2838 }
2839
2840 switch (cmd) {
Tair Rzayev78260ac2014-06-03 22:27:21 +03002841 case BINDER_WRITE_READ:
2842 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2843 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002844 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002845 break;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002846 case BINDER_SET_MAX_THREADS:
2847 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2848 ret = -EINVAL;
2849 goto err;
2850 }
2851 break;
2852 case BINDER_SET_CONTEXT_MGR:
Tair Rzayev78260ac2014-06-03 22:27:21 +03002853 ret = binder_ioctl_set_ctx_mgr(filp);
2854 if (ret)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002855 goto err;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002856 break;
2857 case BINDER_THREAD_EXIT:
Anmol Sarma56b468f2012-10-30 22:35:43 +05302858 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002859 proc->pid, thread->pid);
2860 binder_free_thread(proc, thread);
2861 thread = NULL;
2862 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02002863 case BINDER_VERSION: {
2864 struct binder_version __user *ver = ubuf;
2865
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002866 if (size != sizeof(struct binder_version)) {
2867 ret = -EINVAL;
2868 goto err;
2869 }
Mathieu Maret36c89c02014-04-15 12:03:05 +02002870 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2871 &ver->protocol_version)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002872 ret = -EINVAL;
2873 goto err;
2874 }
2875 break;
Mathieu Maret36c89c02014-04-15 12:03:05 +02002876 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002877 default:
2878 ret = -EINVAL;
2879 goto err;
2880 }
2881 ret = 0;
2882err:
2883 if (thread)
2884 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002885 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002886 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2887 if (ret && ret != -ERESTARTSYS)
Anmol Sarma56b468f2012-10-30 22:35:43 +05302888 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07002889err_unlocked:
2890 trace_binder_ioctl_done(ret);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002891 return ret;
2892}
2893
2894static void binder_vma_open(struct vm_area_struct *vma)
2895{
2896 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09002897
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002898 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302899 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002900 proc->pid, vma->vm_start, vma->vm_end,
2901 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2902 (unsigned long)pgprot_val(vma->vm_page_prot));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002903}
2904
2905static void binder_vma_close(struct vm_area_struct *vma)
2906{
2907 struct binder_proc *proc = vma->vm_private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09002908
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002909 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Anmol Sarma56b468f2012-10-30 22:35:43 +05302910 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002911 proc->pid, vma->vm_start, vma->vm_end,
2912 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2913 (unsigned long)pgprot_val(vma->vm_page_prot));
2914 proc->vma = NULL;
Arve Hjønnevåg2a909572012-03-08 15:43:36 -08002915 proc->vma_vm_mm = NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002916 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2917}
2918
Vinayak Menonddac7d52014-06-02 18:17:59 +05302919static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2920{
2921 return VM_FAULT_SIGBUS;
2922}
2923
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07002924static const struct vm_operations_struct binder_vm_ops = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002925 .open = binder_vma_open,
2926 .close = binder_vma_close,
Vinayak Menonddac7d52014-06-02 18:17:59 +05302927 .fault = binder_vm_fault,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002928};
2929
2930static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2931{
2932 int ret;
2933 struct vm_struct *area;
2934 struct binder_proc *proc = filp->private_data;
2935 const char *failure_string;
2936 struct binder_buffer *buffer;
2937
Al Viroa79f41e2012-08-15 18:23:36 -04002938 if (proc->tsk != current)
2939 return -EINVAL;
2940
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002941 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2942 vma->vm_end = vma->vm_start + SZ_4M;
2943
2944 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2945 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2946 proc->pid, vma->vm_start, vma->vm_end,
2947 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2948 (unsigned long)pgprot_val(vma->vm_page_prot));
2949
2950 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2951 ret = -EPERM;
2952 failure_string = "bad vm_flags";
2953 goto err_bad_arg;
2954 }
2955 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2956
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -08002957 mutex_lock(&binder_mmap_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002958 if (proc->buffer) {
2959 ret = -EBUSY;
2960 failure_string = "already mapped";
2961 goto err_already_mapped;
2962 }
2963
2964 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2965 if (area == NULL) {
2966 ret = -ENOMEM;
2967 failure_string = "get_vm_area";
2968 goto err_get_vm_area_failed;
2969 }
2970 proc->buffer = area->addr;
2971 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -08002972 mutex_unlock(&binder_mmap_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002973
2974#ifdef CONFIG_CPU_CACHE_VIPT
2975 if (cache_is_vipt_aliasing()) {
2976 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
Sherwin Soltani258767f2012-06-26 02:00:30 -04002977 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09002978 vma->vm_start += PAGE_SIZE;
2979 }
2980 }
2981#endif
2982 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2983 if (proc->pages == NULL) {
2984 ret = -ENOMEM;
2985 failure_string = "alloc page array";
2986 goto err_alloc_pages_failed;
2987 }
2988 proc->buffer_size = vma->vm_end - vma->vm_start;
2989
2990 vma->vm_ops = &binder_vm_ops;
2991 vma->vm_private_data = proc;
2992
2993 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2994 ret = -ENOMEM;
2995 failure_string = "alloc small buf";
2996 goto err_alloc_small_buf_failed;
2997 }
2998 buffer = proc->buffer;
2999 INIT_LIST_HEAD(&proc->buffers);
3000 list_add(&buffer->entry, &proc->buffers);
3001 buffer->free = 1;
3002 binder_insert_free_buffer(proc, buffer);
3003 proc->free_async_space = proc->buffer_size / 2;
3004 barrier();
Al Viroa79f41e2012-08-15 18:23:36 -04003005 proc->files = get_files_struct(current);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003006 proc->vma = vma;
Arve Hjønnevåg2a909572012-03-08 15:43:36 -08003007 proc->vma_vm_mm = vma->vm_mm;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003008
Sherwin Soltani258767f2012-06-26 02:00:30 -04003009 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003010 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
3011 return 0;
3012
3013err_alloc_small_buf_failed:
3014 kfree(proc->pages);
3015 proc->pages = NULL;
3016err_alloc_pages_failed:
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -08003017 mutex_lock(&binder_mmap_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003018 vfree(proc->buffer);
3019 proc->buffer = NULL;
3020err_get_vm_area_failed:
3021err_already_mapped:
Arve Hjønnevågbd1eff92012-02-01 15:29:13 -08003022 mutex_unlock(&binder_mmap_lock);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003023err_bad_arg:
Sherwin Soltani258767f2012-06-26 02:00:30 -04003024 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003025 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3026 return ret;
3027}
3028
3029static int binder_open(struct inode *nodp, struct file *filp)
3030{
3031 struct binder_proc *proc;
3032
3033 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3034 current->group_leader->pid, current->pid);
3035
3036 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3037 if (proc == NULL)
3038 return -ENOMEM;
3039 get_task_struct(current);
3040 proc->tsk = current;
Chen Fenga906d692016-02-01 14:04:02 +08003041 proc->vma_vm_mm = current->mm;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003042 INIT_LIST_HEAD(&proc->todo);
3043 init_waitqueue_head(&proc->wait);
3044 proc->default_priority = task_nice(current);
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003045
3046 binder_lock(__func__);
3047
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003048 binder_stats_created(BINDER_STAT_PROC);
3049 hlist_add_head(&proc->proc_node, &binder_procs);
3050 proc->pid = current->group_leader->pid;
3051 INIT_LIST_HEAD(&proc->delivered_death);
3052 filp->private_data = proc;
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003053
3054 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003055
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003056 if (binder_debugfs_dir_entry_proc) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003057 char strbuf[11];
Seunghun Lee10f62862014-05-01 01:30:23 +09003058
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003059 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003060 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3061 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003062 }
3063
3064 return 0;
3065}
3066
3067static int binder_flush(struct file *filp, fl_owner_t id)
3068{
3069 struct binder_proc *proc = filp->private_data;
3070
3071 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3072
3073 return 0;
3074}
3075
3076static void binder_deferred_flush(struct binder_proc *proc)
3077{
3078 struct rb_node *n;
3079 int wake_count = 0;
Seunghun Lee10f62862014-05-01 01:30:23 +09003080
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003081 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3082 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
Seunghun Lee10f62862014-05-01 01:30:23 +09003083
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003084 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3085 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3086 wake_up_interruptible(&thread->wait);
3087 wake_count++;
3088 }
3089 }
3090 wake_up_interruptible_all(&proc->wait);
3091
3092 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3093 "binder_flush: %d woke %d threads\n", proc->pid,
3094 wake_count);
3095}
3096
3097static int binder_release(struct inode *nodp, struct file *filp)
3098{
3099 struct binder_proc *proc = filp->private_data;
Seunghun Lee10f62862014-05-01 01:30:23 +09003100
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003101 debugfs_remove(proc->debugfs_entry);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003102 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3103
3104 return 0;
3105}
3106
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003107static int binder_node_release(struct binder_node *node, int refs)
3108{
3109 struct binder_ref *ref;
3110 int death = 0;
3111
3112 list_del_init(&node->work.entry);
3113 binder_release_work(&node->async_todo);
3114
3115 if (hlist_empty(&node->refs)) {
3116 kfree(node);
3117 binder_stats_deleted(BINDER_STAT_NODE);
3118
3119 return refs;
3120 }
3121
3122 node->proc = NULL;
3123 node->local_strong_refs = 0;
3124 node->local_weak_refs = 0;
3125 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3126
3127 hlist_for_each_entry(ref, &node->refs, node_entry) {
3128 refs++;
3129
3130 if (!ref->death)
Arve Hjønnevåge194fd82014-02-17 13:58:29 -08003131 continue;
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003132
3133 death++;
3134
3135 if (list_empty(&ref->death->work.entry)) {
3136 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3137 list_add_tail(&ref->death->work.entry,
3138 &ref->proc->todo);
3139 wake_up_interruptible(&ref->proc->wait);
3140 } else
3141 BUG();
3142 }
3143
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003144 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3145 "node %d now dead, refs %d, death %d\n",
3146 node->debug_id, refs, death);
3147
3148 return refs;
3149}
3150
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003151static void binder_deferred_release(struct binder_proc *proc)
3152{
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003153 struct binder_transaction *t;
3154 struct rb_node *n;
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003155 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3156 active_transactions, page_count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003157
3158 BUG_ON(proc->vma);
3159 BUG_ON(proc->files);
3160
3161 hlist_del(&proc->proc_node);
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003162
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003163 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
3164 binder_debug(BINDER_DEBUG_DEAD_BINDER,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01003165 "%s: %d context_mgr_node gone\n",
3166 __func__, proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003167 binder_context_mgr_node = NULL;
3168 }
3169
3170 threads = 0;
3171 active_transactions = 0;
3172 while ((n = rb_first(&proc->threads))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003173 struct binder_thread *thread;
3174
3175 thread = rb_entry(n, struct binder_thread, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003176 threads++;
3177 active_transactions += binder_free_thread(proc, thread);
3178 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003179
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003180 nodes = 0;
3181 incoming_refs = 0;
3182 while ((n = rb_first(&proc->nodes))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003183 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003184
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003185 node = rb_entry(n, struct binder_node, rb_node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003186 nodes++;
3187 rb_erase(&node->rb_node, &proc->nodes);
Mirsal Ennaime008fa742013-03-12 11:41:59 +01003188 incoming_refs = binder_node_release(node, incoming_refs);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003189 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003190
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003191 outgoing_refs = 0;
3192 while ((n = rb_first(&proc->refs_by_desc))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003193 struct binder_ref *ref;
3194
3195 ref = rb_entry(n, struct binder_ref, rb_node_desc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003196 outgoing_refs++;
3197 binder_delete_ref(ref);
3198 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003199
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003200 binder_release_work(&proc->todo);
Arve Hjønnevåg675d66b2012-10-16 15:29:54 -07003201 binder_release_work(&proc->delivered_death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003202
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003203 buffers = 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003204 while ((n = rb_first(&proc->allocated_buffers))) {
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003205 struct binder_buffer *buffer;
3206
3207 buffer = rb_entry(n, struct binder_buffer, rb_node);
3208
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003209 t = buffer->transaction;
3210 if (t) {
3211 t->buffer = NULL;
3212 buffer->transaction = NULL;
Anmol Sarma56b468f2012-10-30 22:35:43 +05303213 pr_err("release proc %d, transaction %d, not freed\n",
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003214 proc->pid, t->debug_id);
3215 /*BUG();*/
3216 }
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003217
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003218 binder_free_buf(proc, buffer);
3219 buffers++;
3220 }
3221
3222 binder_stats_deleted(BINDER_STAT_PROC);
3223
3224 page_count = 0;
3225 if (proc->pages) {
3226 int i;
Mirsal Ennaime53413e72013-03-12 11:42:00 +01003227
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003228 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
Mirsal Ennaimeba97bc52013-03-12 11:42:01 +01003229 void *page_addr;
3230
3231 if (!proc->pages[i])
3232 continue;
3233
3234 page_addr = proc->buffer + i * PAGE_SIZE;
3235 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01003236 "%s: %d: page %d at %p not freed\n",
3237 __func__, proc->pid, i, page_addr);
Mirsal Ennaimeba97bc52013-03-12 11:42:01 +01003238 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3239 __free_page(proc->pages[i]);
3240 page_count++;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003241 }
3242 kfree(proc->pages);
3243 vfree(proc->buffer);
3244 }
3245
3246 put_task_struct(proc->tsk);
3247
3248 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
Mirsal Ennaimec07c9332013-03-12 11:42:02 +01003249 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3250 __func__, proc->pid, threads, nodes, incoming_refs,
3251 outgoing_refs, active_transactions, buffers, page_count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003252
3253 kfree(proc);
3254}
3255
3256static void binder_deferred_func(struct work_struct *work)
3257{
3258 struct binder_proc *proc;
3259 struct files_struct *files;
3260
3261 int defer;
Seunghun Lee10f62862014-05-01 01:30:23 +09003262
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003263 do {
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003264 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003265 mutex_lock(&binder_deferred_lock);
3266 if (!hlist_empty(&binder_deferred_list)) {
3267 proc = hlist_entry(binder_deferred_list.first,
3268 struct binder_proc, deferred_work_node);
3269 hlist_del_init(&proc->deferred_work_node);
3270 defer = proc->deferred_work;
3271 proc->deferred_work = 0;
3272 } else {
3273 proc = NULL;
3274 defer = 0;
3275 }
3276 mutex_unlock(&binder_deferred_lock);
3277
3278 files = NULL;
3279 if (defer & BINDER_DEFERRED_PUT_FILES) {
3280 files = proc->files;
3281 if (files)
3282 proc->files = NULL;
3283 }
3284
3285 if (defer & BINDER_DEFERRED_FLUSH)
3286 binder_deferred_flush(proc);
3287
3288 if (defer & BINDER_DEFERRED_RELEASE)
3289 binder_deferred_release(proc); /* frees proc */
3290
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003291 binder_unlock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003292 if (files)
3293 put_files_struct(files);
3294 } while (proc);
3295}
3296static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3297
3298static void
3299binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3300{
3301 mutex_lock(&binder_deferred_lock);
3302 proc->deferred_work |= defer;
3303 if (hlist_unhashed(&proc->deferred_work_node)) {
3304 hlist_add_head(&proc->deferred_work_node,
3305 &binder_deferred_list);
Bhaktipriya Shridhar1beba522016-08-13 22:16:24 +05303306 schedule_work(&binder_deferred_work);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003307 }
3308 mutex_unlock(&binder_deferred_lock);
3309}
3310
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003311static void print_binder_transaction(struct seq_file *m, const char *prefix,
3312 struct binder_transaction *t)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003313{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003314 seq_printf(m,
3315 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3316 prefix, t->debug_id, t,
3317 t->from ? t->from->proc->pid : 0,
3318 t->from ? t->from->pid : 0,
3319 t->to_proc ? t->to_proc->pid : 0,
3320 t->to_thread ? t->to_thread->pid : 0,
3321 t->code, t->flags, t->priority, t->need_reply);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003322 if (t->buffer == NULL) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003323 seq_puts(m, " buffer free\n");
3324 return;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003325 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003326 if (t->buffer->target_node)
3327 seq_printf(m, " node %d",
3328 t->buffer->target_node->debug_id);
3329 seq_printf(m, " size %zd:%zd data %p\n",
3330 t->buffer->data_size, t->buffer->offsets_size,
3331 t->buffer->data);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003332}
3333
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003334static void print_binder_buffer(struct seq_file *m, const char *prefix,
3335 struct binder_buffer *buffer)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003336{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003337 seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3338 prefix, buffer->debug_id, buffer->data,
3339 buffer->data_size, buffer->offsets_size,
3340 buffer->transaction ? "active" : "delivered");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003341}
3342
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003343static void print_binder_work(struct seq_file *m, const char *prefix,
3344 const char *transaction_prefix,
3345 struct binder_work *w)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003346{
3347 struct binder_node *node;
3348 struct binder_transaction *t;
3349
3350 switch (w->type) {
3351 case BINDER_WORK_TRANSACTION:
3352 t = container_of(w, struct binder_transaction, work);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003353 print_binder_transaction(m, transaction_prefix, t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003354 break;
3355 case BINDER_WORK_TRANSACTION_COMPLETE:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003356 seq_printf(m, "%stransaction complete\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003357 break;
3358 case BINDER_WORK_NODE:
3359 node = container_of(w, struct binder_node, work);
Arve Hjønnevågda498892014-02-21 14:40:26 -08003360 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3361 prefix, node->debug_id,
3362 (u64)node->ptr, (u64)node->cookie);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003363 break;
3364 case BINDER_WORK_DEAD_BINDER:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003365 seq_printf(m, "%shas dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003366 break;
3367 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003368 seq_printf(m, "%shas cleared dead binder\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003369 break;
3370 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003371 seq_printf(m, "%shas cleared death notification\n", prefix);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003372 break;
3373 default:
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003374 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003375 break;
3376 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003377}
3378
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003379static void print_binder_thread(struct seq_file *m,
3380 struct binder_thread *thread,
3381 int print_always)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003382{
3383 struct binder_transaction *t;
3384 struct binder_work *w;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003385 size_t start_pos = m->count;
3386 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003387
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003388 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3389 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003390 t = thread->transaction_stack;
3391 while (t) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003392 if (t->from == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003393 print_binder_transaction(m,
3394 " outgoing transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003395 t = t->from_parent;
3396 } else if (t->to_thread == thread) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003397 print_binder_transaction(m,
3398 " incoming transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003399 t = t->to_parent;
3400 } else {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003401 print_binder_transaction(m, " bad transaction", t);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003402 t = NULL;
3403 }
3404 }
3405 list_for_each_entry(w, &thread->todo, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003406 print_binder_work(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003407 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003408 if (!print_always && m->count == header_pos)
3409 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003410}
3411
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003412static void print_binder_node(struct seq_file *m, struct binder_node *node)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003413{
3414 struct binder_ref *ref;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003415 struct binder_work *w;
3416 int count;
3417
3418 count = 0;
Sasha Levinb67bfe02013-02-27 17:06:00 -08003419 hlist_for_each_entry(ref, &node->refs, node_entry)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003420 count++;
3421
Arve Hjønnevågda498892014-02-21 14:40:26 -08003422 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3423 node->debug_id, (u64)node->ptr, (u64)node->cookie,
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003424 node->has_strong_ref, node->has_weak_ref,
3425 node->local_strong_refs, node->local_weak_refs,
3426 node->internal_strong_refs, count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003427 if (count) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003428 seq_puts(m, " proc");
Sasha Levinb67bfe02013-02-27 17:06:00 -08003429 hlist_for_each_entry(ref, &node->refs, node_entry)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003430 seq_printf(m, " %d", ref->proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003431 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003432 seq_puts(m, "\n");
3433 list_for_each_entry(w, &node->async_todo, entry)
3434 print_binder_work(m, " ",
3435 " pending async transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003436}
3437
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003438static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003439{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003440 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3441 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3442 ref->node->debug_id, ref->strong, ref->weak, ref->death);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003443}
3444
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003445static void print_binder_proc(struct seq_file *m,
3446 struct binder_proc *proc, int print_all)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003447{
3448 struct binder_work *w;
3449 struct rb_node *n;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003450 size_t start_pos = m->count;
3451 size_t header_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003452
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003453 seq_printf(m, "proc %d\n", proc->pid);
3454 header_pos = m->count;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003455
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003456 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3457 print_binder_thread(m, rb_entry(n, struct binder_thread,
3458 rb_node), print_all);
3459 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003460 struct binder_node *node = rb_entry(n, struct binder_node,
3461 rb_node);
3462 if (print_all || node->has_async_transaction)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003463 print_binder_node(m, node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003464 }
3465 if (print_all) {
3466 for (n = rb_first(&proc->refs_by_desc);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003467 n != NULL;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003468 n = rb_next(n))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003469 print_binder_ref(m, rb_entry(n, struct binder_ref,
3470 rb_node_desc));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003471 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003472 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3473 print_binder_buffer(m, " buffer",
3474 rb_entry(n, struct binder_buffer, rb_node));
3475 list_for_each_entry(w, &proc->todo, entry)
3476 print_binder_work(m, " ", " pending transaction", w);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003477 list_for_each_entry(w, &proc->delivered_death, entry) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003478 seq_puts(m, " has delivered dead binder\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003479 break;
3480 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003481 if (!print_all && m->count == header_pos)
3482 m->count = start_pos;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003483}
3484
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003485static const char * const binder_return_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003486 "BR_ERROR",
3487 "BR_OK",
3488 "BR_TRANSACTION",
3489 "BR_REPLY",
3490 "BR_ACQUIRE_RESULT",
3491 "BR_DEAD_REPLY",
3492 "BR_TRANSACTION_COMPLETE",
3493 "BR_INCREFS",
3494 "BR_ACQUIRE",
3495 "BR_RELEASE",
3496 "BR_DECREFS",
3497 "BR_ATTEMPT_ACQUIRE",
3498 "BR_NOOP",
3499 "BR_SPAWN_LOOPER",
3500 "BR_FINISHED",
3501 "BR_DEAD_BINDER",
3502 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3503 "BR_FAILED_REPLY"
3504};
3505
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003506static const char * const binder_command_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003507 "BC_TRANSACTION",
3508 "BC_REPLY",
3509 "BC_ACQUIRE_RESULT",
3510 "BC_FREE_BUFFER",
3511 "BC_INCREFS",
3512 "BC_ACQUIRE",
3513 "BC_RELEASE",
3514 "BC_DECREFS",
3515 "BC_INCREFS_DONE",
3516 "BC_ACQUIRE_DONE",
3517 "BC_ATTEMPT_ACQUIRE",
3518 "BC_REGISTER_LOOPER",
3519 "BC_ENTER_LOOPER",
3520 "BC_EXIT_LOOPER",
3521 "BC_REQUEST_DEATH_NOTIFICATION",
3522 "BC_CLEAR_DEATH_NOTIFICATION",
3523 "BC_DEAD_BINDER_DONE"
3524};
3525
Cruz Julian Bishop167bccb2012-12-22 09:00:45 +10003526static const char * const binder_objstat_strings[] = {
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003527 "proc",
3528 "thread",
3529 "node",
3530 "ref",
3531 "death",
3532 "transaction",
3533 "transaction_complete"
3534};
3535
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003536static void print_binder_stats(struct seq_file *m, const char *prefix,
3537 struct binder_stats *stats)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003538{
3539 int i;
3540
3541 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003542 ARRAY_SIZE(binder_command_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003543 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3544 if (stats->bc[i])
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003545 seq_printf(m, "%s%s: %d\n", prefix,
3546 binder_command_strings[i], stats->bc[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003547 }
3548
3549 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003550 ARRAY_SIZE(binder_return_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003551 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3552 if (stats->br[i])
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003553 seq_printf(m, "%s%s: %d\n", prefix,
3554 binder_return_strings[i], stats->br[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003555 }
3556
3557 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003558 ARRAY_SIZE(binder_objstat_strings));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003559 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003560 ARRAY_SIZE(stats->obj_deleted));
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003561 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3562 if (stats->obj_created[i] || stats->obj_deleted[i])
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003563 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3564 binder_objstat_strings[i],
3565 stats->obj_created[i] - stats->obj_deleted[i],
3566 stats->obj_created[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003567 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003568}
3569
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003570static void print_binder_proc_stats(struct seq_file *m,
3571 struct binder_proc *proc)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003572{
3573 struct binder_work *w;
3574 struct rb_node *n;
3575 int count, strong, weak;
3576
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003577 seq_printf(m, "proc %d\n", proc->pid);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003578 count = 0;
3579 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3580 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003581 seq_printf(m, " threads: %d\n", count);
3582 seq_printf(m, " requested threads: %d+%d/%d\n"
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003583 " ready threads %d\n"
3584 " free async space %zd\n", proc->requested_threads,
3585 proc->requested_threads_started, proc->max_threads,
3586 proc->ready_threads, proc->free_async_space);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003587 count = 0;
3588 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3589 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003590 seq_printf(m, " nodes: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003591 count = 0;
3592 strong = 0;
3593 weak = 0;
3594 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3595 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3596 rb_node_desc);
3597 count++;
3598 strong += ref->strong;
3599 weak += ref->weak;
3600 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003601 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003602
3603 count = 0;
3604 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3605 count++;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003606 seq_printf(m, " buffers: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003607
3608 count = 0;
3609 list_for_each_entry(w, &proc->todo, entry) {
3610 switch (w->type) {
3611 case BINDER_WORK_TRANSACTION:
3612 count++;
3613 break;
3614 default:
3615 break;
3616 }
3617 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003618 seq_printf(m, " pending transactions: %d\n", count);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003619
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003620 print_binder_stats(m, " ", &proc->stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003621}
3622
3623
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003624static int binder_state_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003625{
3626 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003627 struct binder_node *node;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003628 int do_lock = !binder_debug_no_lock;
3629
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003630 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003631 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003632
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003633 seq_puts(m, "binder state:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003634
3635 if (!hlist_empty(&binder_dead_nodes))
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003636 seq_puts(m, "dead nodes:\n");
Sasha Levinb67bfe02013-02-27 17:06:00 -08003637 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003638 print_binder_node(m, node);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003639
Sasha Levinb67bfe02013-02-27 17:06:00 -08003640 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003641 print_binder_proc(m, proc, 1);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003642 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003643 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003644 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003645}
3646
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003647static int binder_stats_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003648{
3649 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003650 int do_lock = !binder_debug_no_lock;
3651
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003652 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003653 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003654
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003655 seq_puts(m, "binder stats:\n");
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003656
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003657 print_binder_stats(m, "", &binder_stats);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003658
Sasha Levinb67bfe02013-02-27 17:06:00 -08003659 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003660 print_binder_proc_stats(m, proc);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003661 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003662 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003663 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003664}
3665
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003666static int binder_transactions_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003667{
3668 struct binder_proc *proc;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003669 int do_lock = !binder_debug_no_lock;
3670
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003671 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003672 binder_lock(__func__);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003673
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003674 seq_puts(m, "binder transactions:\n");
Sasha Levinb67bfe02013-02-27 17:06:00 -08003675 hlist_for_each_entry(proc, &binder_procs, proc_node)
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003676 print_binder_proc(m, proc, 0);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003677 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003678 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003679 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003680}
3681
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003682static int binder_proc_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003683{
Riley Andrews83050a42016-02-09 21:05:33 -08003684 struct binder_proc *itr;
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003685 struct binder_proc *proc = m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003686 int do_lock = !binder_debug_no_lock;
Riley Andrews83050a42016-02-09 21:05:33 -08003687 bool valid_proc = false;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003688
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003689 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003690 binder_lock(__func__);
Riley Andrews83050a42016-02-09 21:05:33 -08003691
3692 hlist_for_each_entry(itr, &binder_procs, proc_node) {
3693 if (itr == proc) {
3694 valid_proc = true;
3695 break;
3696 }
3697 }
3698 if (valid_proc) {
3699 seq_puts(m, "binder proc state:\n");
3700 print_binder_proc(m, proc, 1);
3701 }
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003702 if (do_lock)
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003703 binder_unlock(__func__);
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003704 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003705}
3706
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003707static void print_binder_transaction_log_entry(struct seq_file *m,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003708 struct binder_transaction_log_entry *e)
3709{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003710 seq_printf(m,
3711 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3712 e->debug_id, (e->call_type == 2) ? "reply" :
3713 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3714 e->from_thread, e->to_proc, e->to_thread, e->to_node,
3715 e->target_handle, e->data_size, e->offsets_size);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003716}
3717
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003718static int binder_transaction_log_show(struct seq_file *m, void *unused)
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003719{
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003720 struct binder_transaction_log *log = m->private;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003721 int i;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003722
3723 if (log->full) {
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003724 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3725 print_binder_transaction_log_entry(m, &log->entry[i]);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003726 }
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003727 for (i = 0; i < log->next; i++)
3728 print_binder_transaction_log_entry(m, &log->entry[i]);
3729 return 0;
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003730}
3731
3732static const struct file_operations binder_fops = {
3733 .owner = THIS_MODULE,
3734 .poll = binder_poll,
3735 .unlocked_ioctl = binder_ioctl,
Arve Hjønnevågda498892014-02-21 14:40:26 -08003736 .compat_ioctl = binder_ioctl,
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003737 .mmap = binder_mmap,
3738 .open = binder_open,
3739 .flush = binder_flush,
3740 .release = binder_release,
3741};
3742
3743static struct miscdevice binder_miscdev = {
3744 .minor = MISC_DYNAMIC_MINOR,
3745 .name = "binder",
3746 .fops = &binder_fops
3747};
3748
Arve Hjønnevåg5249f482009-04-28 20:57:50 -07003749BINDER_DEBUG_ENTRY(state);
3750BINDER_DEBUG_ENTRY(stats);
3751BINDER_DEBUG_ENTRY(transactions);
3752BINDER_DEBUG_ENTRY(transaction_log);
3753
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003754static int __init binder_init(void)
3755{
3756 int ret;
3757
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003758 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3759 if (binder_debugfs_dir_entry_root)
3760 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3761 binder_debugfs_dir_entry_root);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003762 ret = misc_register(&binder_miscdev);
Arve Hjønnevåg16b66552009-04-28 20:57:50 -07003763 if (binder_debugfs_dir_entry_root) {
3764 debugfs_create_file("state",
3765 S_IRUGO,
3766 binder_debugfs_dir_entry_root,
3767 NULL,
3768 &binder_state_fops);
3769 debugfs_create_file("stats",
3770 S_IRUGO,
3771 binder_debugfs_dir_entry_root,
3772 NULL,
3773 &binder_stats_fops);
3774 debugfs_create_file("transactions",
3775 S_IRUGO,
3776 binder_debugfs_dir_entry_root,
3777 NULL,
3778 &binder_transactions_fops);
3779 debugfs_create_file("transaction_log",
3780 S_IRUGO,
3781 binder_debugfs_dir_entry_root,
3782 &binder_transaction_log,
3783 &binder_transaction_log_fops);
3784 debugfs_create_file("failed_transaction_log",
3785 S_IRUGO,
3786 binder_debugfs_dir_entry_root,
3787 &binder_transaction_log_failed,
3788 &binder_transaction_log_fops);
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003789 }
3790 return ret;
3791}
3792
3793device_initcall(binder_init);
3794
Arve Hjønnevåg975a1ac2012-10-16 15:29:53 -07003795#define CREATE_TRACE_POINTS
3796#include "binder_trace.h"
3797
Greg Kroah-Hartman355b0502011-11-30 20:18:14 +09003798MODULE_LICENSE("GPL v2");