Thomas Gleixner | 9c92ab6 | 2019-05-29 07:17:56 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2017 Google, Inc. |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef _LINUX_BINDER_ALLOC_H |
| 7 | #define _LINUX_BINDER_ALLOC_H |
| 8 | |
| 9 | #include <linux/rbtree.h> |
| 10 | #include <linux/list.h> |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/rtmutex.h> |
| 13 | #include <linux/vmalloc.h> |
| 14 | #include <linux/slab.h> |
Sherry Yang | f2517eb | 2017-08-23 08:46:42 -0700 | [diff] [blame] | 15 | #include <linux/list_lru.h> |
Todd Kjos | 1a7c3d9 | 2019-02-08 10:35:14 -0800 | [diff] [blame] | 16 | #include <uapi/linux/android/binder.h> |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 17 | |
Sherry Yang | f2517eb | 2017-08-23 08:46:42 -0700 | [diff] [blame] | 18 | extern struct list_lru binder_alloc_lru; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 19 | struct binder_transaction; |
| 20 | |
| 21 | /** |
| 22 | * struct binder_buffer - buffer used for binder transactions |
| 23 | * @entry: entry alloc->buffers |
| 24 | * @rb_node: node for allocated_buffers/free_buffers rb trees |
Todd Kjos | 7a2670a | 2018-12-05 15:19:25 -0800 | [diff] [blame] | 25 | * @free: %true if buffer is free |
Todd Kjos | 0f966cb | 2020-11-20 15:37:43 -0800 | [diff] [blame] | 26 | * @clear_on_free: %true if buffer must be zeroed after use |
Todd Kjos | 7a2670a | 2018-12-05 15:19:25 -0800 | [diff] [blame] | 27 | * @allow_user_free: %true if user is allowed to free buffer |
| 28 | * @async_transaction: %true if buffer is in use for an async txn |
Hang Lu | a7dc1e6 | 2021-04-09 17:40:46 +0800 | [diff] [blame] | 29 | * @oneway_spam_suspect: %true if total async allocate size just exceed |
| 30 | * spamming detect threshold |
Todd Kjos | 7a2670a | 2018-12-05 15:19:25 -0800 | [diff] [blame] | 31 | * @debug_id: unique ID for debugging |
| 32 | * @transaction: pointer to associated struct binder_transaction |
| 33 | * @target_node: struct binder_node associated with this buffer |
| 34 | * @data_size: size of @transaction data |
| 35 | * @offsets_size: size of array of offsets |
| 36 | * @extra_buffers_size: size of space for other objects (like sg lists) |
Todd Kjos | bde4a19 | 2019-02-08 10:35:20 -0800 | [diff] [blame] | 37 | * @user_data: user pointer to base of buffer space |
Martijn Coenen | 261e781 | 2020-08-21 14:25:44 +0200 | [diff] [blame] | 38 | * @pid: pid to attribute the buffer to (caller) |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 39 | * |
| 40 | * Bookkeeping structure for binder transaction buffers |
| 41 | */ |
| 42 | struct binder_buffer { |
| 43 | struct list_head entry; /* free and allocated entries by address */ |
| 44 | struct rb_node rb_node; /* free entry by size or allocated entry */ |
| 45 | /* by address */ |
| 46 | unsigned free:1; |
Todd Kjos | 0f966cb | 2020-11-20 15:37:43 -0800 | [diff] [blame] | 47 | unsigned clear_on_free:1; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 48 | unsigned allow_user_free:1; |
| 49 | unsigned async_transaction:1; |
Hang Lu | a7dc1e6 | 2021-04-09 17:40:46 +0800 | [diff] [blame] | 50 | unsigned oneway_spam_suspect:1; |
| 51 | unsigned debug_id:27; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 52 | |
| 53 | struct binder_transaction *transaction; |
| 54 | |
| 55 | struct binder_node *target_node; |
| 56 | size_t data_size; |
| 57 | size_t offsets_size; |
| 58 | size_t extra_buffers_size; |
Todd Kjos | bde4a19 | 2019-02-08 10:35:20 -0800 | [diff] [blame] | 59 | void __user *user_data; |
Martijn Coenen | 261e781 | 2020-08-21 14:25:44 +0200 | [diff] [blame] | 60 | int pid; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 61 | }; |
| 62 | |
| 63 | /** |
Sherry Yang | f2517eb | 2017-08-23 08:46:42 -0700 | [diff] [blame] | 64 | * struct binder_lru_page - page object used for binder shrinker |
| 65 | * @page_ptr: pointer to physical page in mmap'd space |
| 66 | * @lru: entry in binder_alloc_lru |
| 67 | * @alloc: binder_alloc for a proc |
| 68 | */ |
| 69 | struct binder_lru_page { |
| 70 | struct list_head lru; |
| 71 | struct page *page_ptr; |
| 72 | struct binder_alloc *alloc; |
| 73 | }; |
| 74 | |
| 75 | /** |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 76 | * struct binder_alloc - per-binder proc state for binder allocator |
| 77 | * @vma: vm_area_struct passed to mmap_handler |
| 78 | * (invarient after mmap) |
| 79 | * @tsk: tid for task that called init for this proc |
| 80 | * (invariant after init) |
| 81 | * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap) |
| 82 | * @buffer: base of per-proc address space mapped via mmap |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 83 | * @buffers: list of all buffers for this proc |
| 84 | * @free_buffers: rb tree of buffers available for allocation |
| 85 | * sorted by size |
| 86 | * @allocated_buffers: rb tree of allocated buffers sorted by address |
| 87 | * @free_async_space: VA space available for async buffers. This is |
| 88 | * initialized at mmap time to 1/2 the full VA space |
Sherry Yang | f2517eb | 2017-08-23 08:46:42 -0700 | [diff] [blame] | 89 | * @pages: array of binder_lru_page |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 90 | * @buffer_size: size of address space specified via mmap |
| 91 | * @pid: pid for associated binder_proc (invariant after init) |
Martijn Coenen | 8d9a3ab6 | 2017-11-13 10:06:56 +0100 | [diff] [blame] | 92 | * @pages_high: high watermark of offset in @pages |
Hang Lu | a7dc1e6 | 2021-04-09 17:40:46 +0800 | [diff] [blame] | 93 | * @oneway_spam_detected: %true if oneway spam detection fired, clear that |
| 94 | * flag once the async buffer has returned to a healthy state |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 95 | * |
| 96 | * Bookkeeping structure for per-proc address space management for binder |
| 97 | * buffers. It is normally initialized during binder_init() and binder_mmap() |
| 98 | * calls. The address space is used for both user-visible buffers and for |
| 99 | * struct binder_buffer objects used to track the user buffers |
| 100 | */ |
| 101 | struct binder_alloc { |
| 102 | struct mutex mutex; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 103 | struct vm_area_struct *vma; |
| 104 | struct mm_struct *vma_vm_mm; |
Todd Kjos | bde4a19 | 2019-02-08 10:35:20 -0800 | [diff] [blame] | 105 | void __user *buffer; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 106 | struct list_head buffers; |
| 107 | struct rb_root free_buffers; |
| 108 | struct rb_root allocated_buffers; |
| 109 | size_t free_async_space; |
Sherry Yang | f2517eb | 2017-08-23 08:46:42 -0700 | [diff] [blame] | 110 | struct binder_lru_page *pages; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 111 | size_t buffer_size; |
| 112 | uint32_t buffer_free; |
| 113 | int pid; |
Martijn Coenen | 8d9a3ab6 | 2017-11-13 10:06:56 +0100 | [diff] [blame] | 114 | size_t pages_high; |
Hang Lu | a7dc1e6 | 2021-04-09 17:40:46 +0800 | [diff] [blame] | 115 | bool oneway_spam_detected; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 116 | }; |
| 117 | |
Sherry Yang | 4175e2b | 2017-08-23 08:46:40 -0700 | [diff] [blame] | 118 | #ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST |
| 119 | void binder_selftest_alloc(struct binder_alloc *alloc); |
| 120 | #else |
| 121 | static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} |
| 122 | #endif |
Sherry Yang | f2517eb | 2017-08-23 08:46:42 -0700 | [diff] [blame] | 123 | enum lru_status binder_alloc_free_page(struct list_head *item, |
| 124 | struct list_lru_one *lru, |
| 125 | spinlock_t *lock, void *cb_arg); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 126 | extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, |
| 127 | size_t data_size, |
| 128 | size_t offsets_size, |
| 129 | size_t extra_buffers_size, |
Martijn Coenen | 261e781 | 2020-08-21 14:25:44 +0200 | [diff] [blame] | 130 | int is_async, |
| 131 | int pid); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 132 | extern void binder_alloc_init(struct binder_alloc *alloc); |
Tetsuo Handa | 533dfb2 | 2017-11-29 22:29:47 +0900 | [diff] [blame] | 133 | extern int binder_alloc_shrinker_init(void); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 134 | extern void binder_alloc_vma_close(struct binder_alloc *alloc); |
| 135 | extern struct binder_buffer * |
Todd Kjos | 53d311cf | 2017-06-29 12:01:51 -0700 | [diff] [blame] | 136 | binder_alloc_prepare_to_free(struct binder_alloc *alloc, |
| 137 | uintptr_t user_ptr); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 138 | extern void binder_alloc_free_buf(struct binder_alloc *alloc, |
| 139 | struct binder_buffer *buffer); |
| 140 | extern int binder_alloc_mmap_handler(struct binder_alloc *alloc, |
| 141 | struct vm_area_struct *vma); |
| 142 | extern void binder_alloc_deferred_release(struct binder_alloc *alloc); |
| 143 | extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc); |
| 144 | extern void binder_alloc_print_allocated(struct seq_file *m, |
| 145 | struct binder_alloc *alloc); |
Sherry Yang | 8ef4665 | 2017-08-31 11:56:36 -0700 | [diff] [blame] | 146 | void binder_alloc_print_pages(struct seq_file *m, |
| 147 | struct binder_alloc *alloc); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 148 | |
| 149 | /** |
| 150 | * binder_alloc_get_free_async_space() - get free space available for async |
| 151 | * @alloc: binder_alloc for this proc |
| 152 | * |
| 153 | * Return: the bytes remaining in the address-space for async transactions |
| 154 | */ |
| 155 | static inline size_t |
| 156 | binder_alloc_get_free_async_space(struct binder_alloc *alloc) |
| 157 | { |
| 158 | size_t free_async_space; |
| 159 | |
| 160 | mutex_lock(&alloc->mutex); |
| 161 | free_async_space = alloc->free_async_space; |
| 162 | mutex_unlock(&alloc->mutex); |
| 163 | return free_async_space; |
| 164 | } |
| 165 | |
Todd Kjos | 1a7c3d9 | 2019-02-08 10:35:14 -0800 | [diff] [blame] | 166 | unsigned long |
| 167 | binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, |
| 168 | struct binder_buffer *buffer, |
| 169 | binder_size_t buffer_offset, |
| 170 | const void __user *from, |
| 171 | size_t bytes); |
| 172 | |
Todd Kjos | bb4a2e48 | 2019-06-28 09:50:12 -0700 | [diff] [blame] | 173 | int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, |
| 174 | struct binder_buffer *buffer, |
| 175 | binder_size_t buffer_offset, |
| 176 | void *src, |
| 177 | size_t bytes); |
Todd Kjos | 8ced0c6 | 2019-02-08 10:35:15 -0800 | [diff] [blame] | 178 | |
Todd Kjos | bb4a2e48 | 2019-06-28 09:50:12 -0700 | [diff] [blame] | 179 | int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, |
| 180 | void *dest, |
| 181 | struct binder_buffer *buffer, |
| 182 | binder_size_t buffer_offset, |
| 183 | size_t bytes); |
Todd Kjos | 8ced0c6 | 2019-02-08 10:35:15 -0800 | [diff] [blame] | 184 | |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 185 | #endif /* _LINUX_BINDER_ALLOC_H */ |
| 186 | |