Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 1 | /* binder_alloc.c |
| 2 | * |
| 3 | * Android IPC Subsystem |
| 4 | * |
| 5 | * Copyright (C) 2007-2017 Google, Inc. |
| 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | |
| 18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 19 | |
| 20 | #include <asm/cacheflush.h> |
| 21 | #include <linux/list.h> |
| 22 | #include <linux/sched/mm.h> |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/rtmutex.h> |
| 25 | #include <linux/rbtree.h> |
| 26 | #include <linux/seq_file.h> |
| 27 | #include <linux/vmalloc.h> |
| 28 | #include <linux/slab.h> |
| 29 | #include <linux/sched.h> |
| 30 | #include "binder_alloc.h" |
| 31 | #include "binder_trace.h" |
| 32 | |
| 33 | static DEFINE_MUTEX(binder_alloc_mmap_lock); |
| 34 | |
| 35 | enum { |
| 36 | BINDER_DEBUG_OPEN_CLOSE = 1U << 1, |
| 37 | BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, |
| 38 | BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, |
| 39 | }; |
| 40 | static uint32_t binder_alloc_debug_mask; |
| 41 | |
| 42 | module_param_named(debug_mask, binder_alloc_debug_mask, |
| 43 | uint, 0644); |
| 44 | |
| 45 | #define binder_alloc_debug(mask, x...) \ |
| 46 | do { \ |
| 47 | if (binder_alloc_debug_mask & mask) \ |
| 48 | pr_info(x); \ |
| 49 | } while (0) |
| 50 | |
Sherry Yang | e2176219 | 2017-08-23 08:46:39 -0700 | [diff] [blame] | 51 | static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) |
| 52 | { |
| 53 | return list_entry(buffer->entry.next, struct binder_buffer, entry); |
| 54 | } |
| 55 | |
| 56 | static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) |
| 57 | { |
| 58 | return list_entry(buffer->entry.prev, struct binder_buffer, entry); |
| 59 | } |
| 60 | |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 61 | static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, |
| 62 | struct binder_buffer *buffer) |
| 63 | { |
| 64 | if (list_is_last(&buffer->entry, &alloc->buffers)) |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 65 | return (u8 *)alloc->buffer + |
| 66 | alloc->buffer_size - (u8 *)buffer->data; |
| 67 | return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | static void binder_insert_free_buffer(struct binder_alloc *alloc, |
| 71 | struct binder_buffer *new_buffer) |
| 72 | { |
| 73 | struct rb_node **p = &alloc->free_buffers.rb_node; |
| 74 | struct rb_node *parent = NULL; |
| 75 | struct binder_buffer *buffer; |
| 76 | size_t buffer_size; |
| 77 | size_t new_buffer_size; |
| 78 | |
| 79 | BUG_ON(!new_buffer->free); |
| 80 | |
| 81 | new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); |
| 82 | |
| 83 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
| 84 | "%d: add free buffer, size %zd, at %pK\n", |
| 85 | alloc->pid, new_buffer_size, new_buffer); |
| 86 | |
| 87 | while (*p) { |
| 88 | parent = *p; |
| 89 | buffer = rb_entry(parent, struct binder_buffer, rb_node); |
| 90 | BUG_ON(!buffer->free); |
| 91 | |
| 92 | buffer_size = binder_alloc_buffer_size(alloc, buffer); |
| 93 | |
| 94 | if (new_buffer_size < buffer_size) |
| 95 | p = &parent->rb_left; |
| 96 | else |
| 97 | p = &parent->rb_right; |
| 98 | } |
| 99 | rb_link_node(&new_buffer->rb_node, parent, p); |
| 100 | rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); |
| 101 | } |
| 102 | |
| 103 | static void binder_insert_allocated_buffer_locked( |
| 104 | struct binder_alloc *alloc, struct binder_buffer *new_buffer) |
| 105 | { |
| 106 | struct rb_node **p = &alloc->allocated_buffers.rb_node; |
| 107 | struct rb_node *parent = NULL; |
| 108 | struct binder_buffer *buffer; |
| 109 | |
| 110 | BUG_ON(new_buffer->free); |
| 111 | |
| 112 | while (*p) { |
| 113 | parent = *p; |
| 114 | buffer = rb_entry(parent, struct binder_buffer, rb_node); |
| 115 | BUG_ON(buffer->free); |
| 116 | |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 117 | if (new_buffer->data < buffer->data) |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 118 | p = &parent->rb_left; |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 119 | else if (new_buffer->data > buffer->data) |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 120 | p = &parent->rb_right; |
| 121 | else |
| 122 | BUG(); |
| 123 | } |
| 124 | rb_link_node(&new_buffer->rb_node, parent, p); |
| 125 | rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); |
| 126 | } |
| 127 | |
Todd Kjos | 53d311cf | 2017-06-29 12:01:51 -0700 | [diff] [blame] | 128 | static struct binder_buffer *binder_alloc_prepare_to_free_locked( |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 129 | struct binder_alloc *alloc, |
| 130 | uintptr_t user_ptr) |
| 131 | { |
| 132 | struct rb_node *n = alloc->allocated_buffers.rb_node; |
| 133 | struct binder_buffer *buffer; |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 134 | void *kern_ptr; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 135 | |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 136 | kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 137 | |
| 138 | while (n) { |
| 139 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
| 140 | BUG_ON(buffer->free); |
| 141 | |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 142 | if (kern_ptr < buffer->data) |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 143 | n = n->rb_left; |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 144 | else if (kern_ptr > buffer->data) |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 145 | n = n->rb_right; |
Todd Kjos | 53d311cf | 2017-06-29 12:01:51 -0700 | [diff] [blame] | 146 | else { |
| 147 | /* |
| 148 | * Guard against user threads attempting to |
| 149 | * free the buffer twice |
| 150 | */ |
| 151 | if (buffer->free_in_progress) { |
| 152 | pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", |
| 153 | alloc->pid, current->pid, (u64)user_ptr); |
| 154 | return NULL; |
| 155 | } |
| 156 | buffer->free_in_progress = 1; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 157 | return buffer; |
Todd Kjos | 53d311cf | 2017-06-29 12:01:51 -0700 | [diff] [blame] | 158 | } |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 159 | } |
| 160 | return NULL; |
| 161 | } |
| 162 | |
| 163 | /** |
| 164 | * binder_alloc_buffer_lookup() - get buffer given user ptr |
| 165 | * @alloc: binder_alloc for this proc |
| 166 | * @user_ptr: User pointer to buffer data |
| 167 | * |
| 168 | * Validate userspace pointer to buffer data and return buffer corresponding to |
| 169 | * that user pointer. Search the rb tree for buffer that matches user data |
| 170 | * pointer. |
| 171 | * |
| 172 | * Return: Pointer to buffer or NULL |
| 173 | */ |
Todd Kjos | 53d311cf | 2017-06-29 12:01:51 -0700 | [diff] [blame] | 174 | struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, |
| 175 | uintptr_t user_ptr) |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 176 | { |
| 177 | struct binder_buffer *buffer; |
| 178 | |
| 179 | mutex_lock(&alloc->mutex); |
Todd Kjos | 53d311cf | 2017-06-29 12:01:51 -0700 | [diff] [blame] | 180 | buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 181 | mutex_unlock(&alloc->mutex); |
| 182 | return buffer; |
| 183 | } |
| 184 | |
| 185 | static int binder_update_page_range(struct binder_alloc *alloc, int allocate, |
| 186 | void *start, void *end, |
| 187 | struct vm_area_struct *vma) |
| 188 | { |
| 189 | void *page_addr; |
| 190 | unsigned long user_page_addr; |
| 191 | struct page **page; |
| 192 | struct mm_struct *mm; |
| 193 | |
| 194 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
| 195 | "%d: %s pages %pK-%pK\n", alloc->pid, |
| 196 | allocate ? "allocate" : "free", start, end); |
| 197 | |
| 198 | if (end <= start) |
| 199 | return 0; |
| 200 | |
| 201 | trace_binder_update_page_range(alloc, allocate, start, end); |
| 202 | |
| 203 | if (vma) |
| 204 | mm = NULL; |
| 205 | else |
| 206 | mm = get_task_mm(alloc->tsk); |
| 207 | |
| 208 | if (mm) { |
| 209 | down_write(&mm->mmap_sem); |
| 210 | vma = alloc->vma; |
| 211 | if (vma && mm != alloc->vma_vm_mm) { |
| 212 | pr_err("%d: vma mm and task mm mismatch\n", |
| 213 | alloc->pid); |
| 214 | vma = NULL; |
| 215 | } |
| 216 | } |
| 217 | |
| 218 | if (allocate == 0) |
| 219 | goto free_range; |
| 220 | |
| 221 | if (vma == NULL) { |
| 222 | pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", |
| 223 | alloc->pid); |
| 224 | goto err_no_vma; |
| 225 | } |
| 226 | |
| 227 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { |
| 228 | int ret; |
| 229 | |
| 230 | page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; |
| 231 | |
| 232 | BUG_ON(*page); |
| 233 | *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); |
| 234 | if (*page == NULL) { |
| 235 | pr_err("%d: binder_alloc_buf failed for page at %pK\n", |
| 236 | alloc->pid, page_addr); |
| 237 | goto err_alloc_page_failed; |
| 238 | } |
| 239 | ret = map_kernel_range_noflush((unsigned long)page_addr, |
| 240 | PAGE_SIZE, PAGE_KERNEL, page); |
| 241 | flush_cache_vmap((unsigned long)page_addr, |
| 242 | (unsigned long)page_addr + PAGE_SIZE); |
| 243 | if (ret != 1) { |
| 244 | pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", |
| 245 | alloc->pid, page_addr); |
| 246 | goto err_map_kernel_failed; |
| 247 | } |
| 248 | user_page_addr = |
| 249 | (uintptr_t)page_addr + alloc->user_buffer_offset; |
| 250 | ret = vm_insert_page(vma, user_page_addr, page[0]); |
| 251 | if (ret) { |
| 252 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", |
| 253 | alloc->pid, user_page_addr); |
| 254 | goto err_vm_insert_page_failed; |
| 255 | } |
| 256 | /* vm_insert_page does not seem to increment the refcount */ |
| 257 | } |
| 258 | if (mm) { |
| 259 | up_write(&mm->mmap_sem); |
| 260 | mmput(mm); |
| 261 | } |
| 262 | return 0; |
| 263 | |
| 264 | free_range: |
| 265 | for (page_addr = end - PAGE_SIZE; page_addr >= start; |
| 266 | page_addr -= PAGE_SIZE) { |
| 267 | page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; |
| 268 | if (vma) |
| 269 | zap_page_range(vma, (uintptr_t)page_addr + |
| 270 | alloc->user_buffer_offset, PAGE_SIZE); |
| 271 | err_vm_insert_page_failed: |
| 272 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); |
| 273 | err_map_kernel_failed: |
| 274 | __free_page(*page); |
| 275 | *page = NULL; |
| 276 | err_alloc_page_failed: |
| 277 | ; |
| 278 | } |
| 279 | err_no_vma: |
| 280 | if (mm) { |
| 281 | up_write(&mm->mmap_sem); |
| 282 | mmput(mm); |
| 283 | } |
Todd Kjos | 57ada2f | 2017-06-29 12:01:46 -0700 | [diff] [blame] | 284 | return vma ? -ENOMEM : -ESRCH; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 285 | } |
| 286 | |
| 287 | struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, |
| 288 | size_t data_size, |
| 289 | size_t offsets_size, |
| 290 | size_t extra_buffers_size, |
| 291 | int is_async) |
| 292 | { |
| 293 | struct rb_node *n = alloc->free_buffers.rb_node; |
| 294 | struct binder_buffer *buffer; |
| 295 | size_t buffer_size; |
| 296 | struct rb_node *best_fit = NULL; |
| 297 | void *has_page_addr; |
| 298 | void *end_page_addr; |
| 299 | size_t size, data_offsets_size; |
Todd Kjos | 57ada2f | 2017-06-29 12:01:46 -0700 | [diff] [blame] | 300 | int ret; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 301 | |
| 302 | if (alloc->vma == NULL) { |
| 303 | pr_err("%d: binder_alloc_buf, no vma\n", |
| 304 | alloc->pid); |
Todd Kjos | 57ada2f | 2017-06-29 12:01:46 -0700 | [diff] [blame] | 305 | return ERR_PTR(-ESRCH); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | data_offsets_size = ALIGN(data_size, sizeof(void *)) + |
| 309 | ALIGN(offsets_size, sizeof(void *)); |
| 310 | |
| 311 | if (data_offsets_size < data_size || data_offsets_size < offsets_size) { |
| 312 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
| 313 | "%d: got transaction with invalid size %zd-%zd\n", |
| 314 | alloc->pid, data_size, offsets_size); |
Todd Kjos | 57ada2f | 2017-06-29 12:01:46 -0700 | [diff] [blame] | 315 | return ERR_PTR(-EINVAL); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 316 | } |
| 317 | size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); |
| 318 | if (size < data_offsets_size || size < extra_buffers_size) { |
| 319 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
| 320 | "%d: got transaction with invalid extra_buffers_size %zd\n", |
| 321 | alloc->pid, extra_buffers_size); |
Todd Kjos | 57ada2f | 2017-06-29 12:01:46 -0700 | [diff] [blame] | 322 | return ERR_PTR(-EINVAL); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 323 | } |
| 324 | if (is_async && |
| 325 | alloc->free_async_space < size + sizeof(struct binder_buffer)) { |
| 326 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
| 327 | "%d: binder_alloc_buf size %zd failed, no async space left\n", |
| 328 | alloc->pid, size); |
Todd Kjos | 57ada2f | 2017-06-29 12:01:46 -0700 | [diff] [blame] | 329 | return ERR_PTR(-ENOSPC); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 330 | } |
| 331 | |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 332 | /* Pad 0-size buffers so they get assigned unique addresses */ |
| 333 | size = max(size, sizeof(void *)); |
| 334 | |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 335 | while (n) { |
| 336 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
| 337 | BUG_ON(!buffer->free); |
| 338 | buffer_size = binder_alloc_buffer_size(alloc, buffer); |
| 339 | |
| 340 | if (size < buffer_size) { |
| 341 | best_fit = n; |
| 342 | n = n->rb_left; |
| 343 | } else if (size > buffer_size) |
| 344 | n = n->rb_right; |
| 345 | else { |
| 346 | best_fit = n; |
| 347 | break; |
| 348 | } |
| 349 | } |
| 350 | if (best_fit == NULL) { |
Martijn Coenen | b05a68e | 2017-06-29 12:01:52 -0700 | [diff] [blame] | 351 | size_t allocated_buffers = 0; |
| 352 | size_t largest_alloc_size = 0; |
| 353 | size_t total_alloc_size = 0; |
| 354 | size_t free_buffers = 0; |
| 355 | size_t largest_free_size = 0; |
| 356 | size_t total_free_size = 0; |
| 357 | |
| 358 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; |
| 359 | n = rb_next(n)) { |
| 360 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
| 361 | buffer_size = binder_alloc_buffer_size(alloc, buffer); |
| 362 | allocated_buffers++; |
| 363 | total_alloc_size += buffer_size; |
| 364 | if (buffer_size > largest_alloc_size) |
| 365 | largest_alloc_size = buffer_size; |
| 366 | } |
| 367 | for (n = rb_first(&alloc->free_buffers); n != NULL; |
| 368 | n = rb_next(n)) { |
| 369 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
| 370 | buffer_size = binder_alloc_buffer_size(alloc, buffer); |
| 371 | free_buffers++; |
| 372 | total_free_size += buffer_size; |
| 373 | if (buffer_size > largest_free_size) |
| 374 | largest_free_size = buffer_size; |
| 375 | } |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 376 | pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", |
| 377 | alloc->pid, size); |
Martijn Coenen | b05a68e | 2017-06-29 12:01:52 -0700 | [diff] [blame] | 378 | pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", |
| 379 | total_alloc_size, allocated_buffers, largest_alloc_size, |
| 380 | total_free_size, free_buffers, largest_free_size); |
Todd Kjos | 57ada2f | 2017-06-29 12:01:46 -0700 | [diff] [blame] | 381 | return ERR_PTR(-ENOSPC); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 382 | } |
| 383 | if (n == NULL) { |
| 384 | buffer = rb_entry(best_fit, struct binder_buffer, rb_node); |
| 385 | buffer_size = binder_alloc_buffer_size(alloc, buffer); |
| 386 | } |
| 387 | |
| 388 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
| 389 | "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", |
| 390 | alloc->pid, size, buffer, buffer_size); |
| 391 | |
| 392 | has_page_addr = |
| 393 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 394 | WARN_ON(n && buffer_size != size); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 395 | end_page_addr = |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 396 | (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 397 | if (end_page_addr > has_page_addr) |
| 398 | end_page_addr = has_page_addr; |
Todd Kjos | 57ada2f | 2017-06-29 12:01:46 -0700 | [diff] [blame] | 399 | ret = binder_update_page_range(alloc, 1, |
| 400 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL); |
| 401 | if (ret) |
| 402 | return ERR_PTR(ret); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 403 | |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 404 | if (buffer_size != size) { |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 405 | struct binder_buffer *new_buffer; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 406 | |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 407 | new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
| 408 | if (!new_buffer) { |
| 409 | pr_err("%s: %d failed to alloc new buffer struct\n", |
| 410 | __func__, alloc->pid); |
| 411 | goto err_alloc_buf_struct_failed; |
| 412 | } |
| 413 | new_buffer->data = (u8 *)buffer->data + size; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 414 | list_add(&new_buffer->entry, &buffer->entry); |
| 415 | new_buffer->free = 1; |
| 416 | binder_insert_free_buffer(alloc, new_buffer); |
| 417 | } |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 418 | |
| 419 | rb_erase(best_fit, &alloc->free_buffers); |
| 420 | buffer->free = 0; |
| 421 | buffer->free_in_progress = 0; |
| 422 | binder_insert_allocated_buffer_locked(alloc, buffer); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 423 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
| 424 | "%d: binder_alloc_buf size %zd got %pK\n", |
| 425 | alloc->pid, size, buffer); |
| 426 | buffer->data_size = data_size; |
| 427 | buffer->offsets_size = offsets_size; |
| 428 | buffer->async_transaction = is_async; |
| 429 | buffer->extra_buffers_size = extra_buffers_size; |
| 430 | if (is_async) { |
| 431 | alloc->free_async_space -= size + sizeof(struct binder_buffer); |
| 432 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, |
| 433 | "%d: binder_alloc_buf size %zd async free %zd\n", |
| 434 | alloc->pid, size, alloc->free_async_space); |
| 435 | } |
| 436 | return buffer; |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 437 | |
| 438 | err_alloc_buf_struct_failed: |
| 439 | binder_update_page_range(alloc, 0, |
| 440 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), |
| 441 | end_page_addr, NULL); |
| 442 | return ERR_PTR(-ENOMEM); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 443 | } |
| 444 | |
| 445 | /** |
| 446 | * binder_alloc_new_buf() - Allocate a new binder buffer |
| 447 | * @alloc: binder_alloc for this proc |
| 448 | * @data_size: size of user data buffer |
| 449 | * @offsets_size: user specified buffer offset |
| 450 | * @extra_buffers_size: size of extra space for meta-data (eg, security context) |
| 451 | * @is_async: buffer for async transaction |
| 452 | * |
| 453 | * Allocate a new buffer given the requested sizes. Returns |
| 454 | * the kernel version of the buffer pointer. The size allocated |
| 455 | * is the sum of the three given sizes (each rounded up to |
| 456 | * pointer-sized boundary) |
| 457 | * |
| 458 | * Return: The allocated buffer or %NULL if error |
| 459 | */ |
| 460 | struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, |
| 461 | size_t data_size, |
| 462 | size_t offsets_size, |
| 463 | size_t extra_buffers_size, |
| 464 | int is_async) |
| 465 | { |
| 466 | struct binder_buffer *buffer; |
| 467 | |
| 468 | mutex_lock(&alloc->mutex); |
| 469 | buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, |
| 470 | extra_buffers_size, is_async); |
| 471 | mutex_unlock(&alloc->mutex); |
| 472 | return buffer; |
| 473 | } |
| 474 | |
| 475 | static void *buffer_start_page(struct binder_buffer *buffer) |
| 476 | { |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 477 | return (void *)((uintptr_t)buffer->data & PAGE_MASK); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 478 | } |
| 479 | |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 480 | static void *prev_buffer_end_page(struct binder_buffer *buffer) |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 481 | { |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 482 | return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 483 | } |
| 484 | |
| 485 | static void binder_delete_free_buffer(struct binder_alloc *alloc, |
| 486 | struct binder_buffer *buffer) |
| 487 | { |
| 488 | struct binder_buffer *prev, *next = NULL; |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 489 | bool to_free = true; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 490 | BUG_ON(alloc->buffers.next == &buffer->entry); |
Sherry Yang | e2176219 | 2017-08-23 08:46:39 -0700 | [diff] [blame] | 491 | prev = binder_buffer_prev(buffer); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 492 | BUG_ON(!prev->free); |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 493 | if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { |
| 494 | to_free = false; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 495 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 496 | "%d: merge free, buffer %pK share page with %pK\n", |
| 497 | alloc->pid, buffer->data, prev->data); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 498 | } |
| 499 | |
| 500 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { |
Sherry Yang | e2176219 | 2017-08-23 08:46:39 -0700 | [diff] [blame] | 501 | next = binder_buffer_next(buffer); |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 502 | if (buffer_start_page(next) == buffer_start_page(buffer)) { |
| 503 | to_free = false; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 504 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 505 | "%d: merge free, buffer %pK share page with %pK\n", |
| 506 | alloc->pid, |
| 507 | buffer->data, |
| 508 | next->data); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 509 | } |
| 510 | } |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 511 | |
| 512 | if (PAGE_ALIGNED(buffer->data)) { |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 513 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 514 | "%d: merge free, buffer start %pK is page aligned\n", |
| 515 | alloc->pid, buffer->data); |
| 516 | to_free = false; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 517 | } |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 518 | |
| 519 | if (to_free) { |
| 520 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
| 521 | "%d: merge free, buffer %pK do not share page with %pK or %pK\n", |
| 522 | alloc->pid, buffer->data, |
| 523 | prev->data, next->data); |
| 524 | binder_update_page_range(alloc, 0, buffer_start_page(buffer), |
| 525 | buffer_start_page(buffer) + PAGE_SIZE, |
| 526 | NULL); |
| 527 | } |
| 528 | list_del(&buffer->entry); |
| 529 | kfree(buffer); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 530 | } |
| 531 | |
| 532 | static void binder_free_buf_locked(struct binder_alloc *alloc, |
| 533 | struct binder_buffer *buffer) |
| 534 | { |
| 535 | size_t size, buffer_size; |
| 536 | |
| 537 | buffer_size = binder_alloc_buffer_size(alloc, buffer); |
| 538 | |
| 539 | size = ALIGN(buffer->data_size, sizeof(void *)) + |
| 540 | ALIGN(buffer->offsets_size, sizeof(void *)) + |
| 541 | ALIGN(buffer->extra_buffers_size, sizeof(void *)); |
| 542 | |
| 543 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
| 544 | "%d: binder_free_buf %pK size %zd buffer_size %zd\n", |
| 545 | alloc->pid, buffer, size, buffer_size); |
| 546 | |
| 547 | BUG_ON(buffer->free); |
| 548 | BUG_ON(size > buffer_size); |
| 549 | BUG_ON(buffer->transaction != NULL); |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 550 | BUG_ON(buffer->data < alloc->buffer); |
| 551 | BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 552 | |
| 553 | if (buffer->async_transaction) { |
| 554 | alloc->free_async_space += size + sizeof(struct binder_buffer); |
| 555 | |
| 556 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, |
| 557 | "%d: binder_free_buf size %zd async free %zd\n", |
| 558 | alloc->pid, size, alloc->free_async_space); |
| 559 | } |
| 560 | |
| 561 | binder_update_page_range(alloc, 0, |
| 562 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), |
| 563 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), |
| 564 | NULL); |
| 565 | |
| 566 | rb_erase(&buffer->rb_node, &alloc->allocated_buffers); |
| 567 | buffer->free = 1; |
| 568 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { |
Sherry Yang | e2176219 | 2017-08-23 08:46:39 -0700 | [diff] [blame] | 569 | struct binder_buffer *next = binder_buffer_next(buffer); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 570 | |
| 571 | if (next->free) { |
| 572 | rb_erase(&next->rb_node, &alloc->free_buffers); |
| 573 | binder_delete_free_buffer(alloc, next); |
| 574 | } |
| 575 | } |
| 576 | if (alloc->buffers.next != &buffer->entry) { |
Sherry Yang | e2176219 | 2017-08-23 08:46:39 -0700 | [diff] [blame] | 577 | struct binder_buffer *prev = binder_buffer_prev(buffer); |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 578 | |
| 579 | if (prev->free) { |
| 580 | binder_delete_free_buffer(alloc, buffer); |
| 581 | rb_erase(&prev->rb_node, &alloc->free_buffers); |
| 582 | buffer = prev; |
| 583 | } |
| 584 | } |
| 585 | binder_insert_free_buffer(alloc, buffer); |
| 586 | } |
| 587 | |
| 588 | /** |
| 589 | * binder_alloc_free_buf() - free a binder buffer |
| 590 | * @alloc: binder_alloc for this proc |
| 591 | * @buffer: kernel pointer to buffer |
| 592 | * |
| 593 | * Free the buffer allocated via binder_alloc_new_buffer() |
| 594 | */ |
| 595 | void binder_alloc_free_buf(struct binder_alloc *alloc, |
| 596 | struct binder_buffer *buffer) |
| 597 | { |
| 598 | mutex_lock(&alloc->mutex); |
| 599 | binder_free_buf_locked(alloc, buffer); |
| 600 | mutex_unlock(&alloc->mutex); |
| 601 | } |
| 602 | |
| 603 | /** |
| 604 | * binder_alloc_mmap_handler() - map virtual address space for proc |
| 605 | * @alloc: alloc structure for this proc |
| 606 | * @vma: vma passed to mmap() |
| 607 | * |
| 608 | * Called by binder_mmap() to initialize the space specified in |
| 609 | * vma for allocating binder buffers |
| 610 | * |
| 611 | * Return: |
| 612 | * 0 = success |
| 613 | * -EBUSY = address space already mapped |
| 614 | * -ENOMEM = failed to map memory to given address space |
| 615 | */ |
| 616 | int binder_alloc_mmap_handler(struct binder_alloc *alloc, |
| 617 | struct vm_area_struct *vma) |
| 618 | { |
| 619 | int ret; |
| 620 | struct vm_struct *area; |
| 621 | const char *failure_string; |
| 622 | struct binder_buffer *buffer; |
| 623 | |
| 624 | mutex_lock(&binder_alloc_mmap_lock); |
| 625 | if (alloc->buffer) { |
| 626 | ret = -EBUSY; |
| 627 | failure_string = "already mapped"; |
| 628 | goto err_already_mapped; |
| 629 | } |
| 630 | |
| 631 | area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); |
| 632 | if (area == NULL) { |
| 633 | ret = -ENOMEM; |
| 634 | failure_string = "get_vm_area"; |
| 635 | goto err_get_vm_area_failed; |
| 636 | } |
| 637 | alloc->buffer = area->addr; |
| 638 | alloc->user_buffer_offset = |
| 639 | vma->vm_start - (uintptr_t)alloc->buffer; |
| 640 | mutex_unlock(&binder_alloc_mmap_lock); |
| 641 | |
| 642 | #ifdef CONFIG_CPU_CACHE_VIPT |
| 643 | if (cache_is_vipt_aliasing()) { |
| 644 | while (CACHE_COLOUR( |
| 645 | (vma->vm_start ^ (uint32_t)alloc->buffer))) { |
| 646 | pr_info("%s: %d %lx-%lx maps %pK bad alignment\n", |
| 647 | __func__, alloc->pid, vma->vm_start, |
| 648 | vma->vm_end, alloc->buffer); |
| 649 | vma->vm_start += PAGE_SIZE; |
| 650 | } |
| 651 | } |
| 652 | #endif |
| 653 | alloc->pages = kzalloc(sizeof(alloc->pages[0]) * |
| 654 | ((vma->vm_end - vma->vm_start) / PAGE_SIZE), |
| 655 | GFP_KERNEL); |
| 656 | if (alloc->pages == NULL) { |
| 657 | ret = -ENOMEM; |
| 658 | failure_string = "alloc page array"; |
| 659 | goto err_alloc_pages_failed; |
| 660 | } |
| 661 | alloc->buffer_size = vma->vm_end - vma->vm_start; |
| 662 | |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 663 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
| 664 | if (!buffer) { |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 665 | ret = -ENOMEM; |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 666 | failure_string = "alloc buffer struct"; |
| 667 | goto err_alloc_buf_struct_failed; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 668 | } |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 669 | |
| 670 | buffer->data = alloc->buffer; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 671 | INIT_LIST_HEAD(&alloc->buffers); |
| 672 | list_add(&buffer->entry, &alloc->buffers); |
| 673 | buffer->free = 1; |
| 674 | binder_insert_free_buffer(alloc, buffer); |
| 675 | alloc->free_async_space = alloc->buffer_size / 2; |
| 676 | barrier(); |
| 677 | alloc->vma = vma; |
| 678 | alloc->vma_vm_mm = vma->vm_mm; |
| 679 | |
| 680 | return 0; |
| 681 | |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 682 | err_alloc_buf_struct_failed: |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 683 | kfree(alloc->pages); |
| 684 | alloc->pages = NULL; |
| 685 | err_alloc_pages_failed: |
| 686 | mutex_lock(&binder_alloc_mmap_lock); |
| 687 | vfree(alloc->buffer); |
| 688 | alloc->buffer = NULL; |
| 689 | err_get_vm_area_failed: |
| 690 | err_already_mapped: |
| 691 | mutex_unlock(&binder_alloc_mmap_lock); |
| 692 | pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, |
| 693 | alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); |
| 694 | return ret; |
| 695 | } |
| 696 | |
| 697 | |
| 698 | void binder_alloc_deferred_release(struct binder_alloc *alloc) |
| 699 | { |
| 700 | struct rb_node *n; |
| 701 | int buffers, page_count; |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 702 | struct binder_buffer *buffer; |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 703 | |
| 704 | BUG_ON(alloc->vma); |
| 705 | |
| 706 | buffers = 0; |
| 707 | mutex_lock(&alloc->mutex); |
| 708 | while ((n = rb_first(&alloc->allocated_buffers))) { |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 709 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
| 710 | |
| 711 | /* Transaction should already have been freed */ |
| 712 | BUG_ON(buffer->transaction); |
| 713 | |
| 714 | binder_free_buf_locked(alloc, buffer); |
| 715 | buffers++; |
| 716 | } |
| 717 | |
Sherry Yang | 74310e0 | 2017-08-23 08:46:41 -0700 | [diff] [blame^] | 718 | while (!list_empty(&alloc->buffers)) { |
| 719 | buffer = list_first_entry(&alloc->buffers, |
| 720 | struct binder_buffer, entry); |
| 721 | WARN_ON(!buffer->free); |
| 722 | |
| 723 | list_del(&buffer->entry); |
| 724 | WARN_ON_ONCE(!list_empty(&alloc->buffers)); |
| 725 | kfree(buffer); |
| 726 | } |
| 727 | |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 728 | page_count = 0; |
| 729 | if (alloc->pages) { |
| 730 | int i; |
| 731 | |
| 732 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { |
| 733 | void *page_addr; |
| 734 | |
| 735 | if (!alloc->pages[i]) |
| 736 | continue; |
| 737 | |
| 738 | page_addr = alloc->buffer + i * PAGE_SIZE; |
| 739 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
| 740 | "%s: %d: page %d at %pK not freed\n", |
| 741 | __func__, alloc->pid, i, page_addr); |
| 742 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); |
| 743 | __free_page(alloc->pages[i]); |
| 744 | page_count++; |
| 745 | } |
| 746 | kfree(alloc->pages); |
| 747 | vfree(alloc->buffer); |
| 748 | } |
| 749 | mutex_unlock(&alloc->mutex); |
| 750 | |
| 751 | binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, |
| 752 | "%s: %d buffers %d, pages %d\n", |
| 753 | __func__, alloc->pid, buffers, page_count); |
| 754 | } |
| 755 | |
| 756 | static void print_binder_buffer(struct seq_file *m, const char *prefix, |
| 757 | struct binder_buffer *buffer) |
| 758 | { |
Martijn Coenen | b05a68e | 2017-06-29 12:01:52 -0700 | [diff] [blame] | 759 | seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 760 | prefix, buffer->debug_id, buffer->data, |
| 761 | buffer->data_size, buffer->offsets_size, |
Martijn Coenen | b05a68e | 2017-06-29 12:01:52 -0700 | [diff] [blame] | 762 | buffer->extra_buffers_size, |
Todd Kjos | 0c972a0 | 2017-06-29 12:01:41 -0700 | [diff] [blame] | 763 | buffer->transaction ? "active" : "delivered"); |
| 764 | } |
| 765 | |
| 766 | /** |
| 767 | * binder_alloc_print_allocated() - print buffer info |
| 768 | * @m: seq_file for output via seq_printf() |
| 769 | * @alloc: binder_alloc for this proc |
| 770 | * |
| 771 | * Prints information about every buffer associated with |
| 772 | * the binder_alloc state to the given seq_file |
| 773 | */ |
| 774 | void binder_alloc_print_allocated(struct seq_file *m, |
| 775 | struct binder_alloc *alloc) |
| 776 | { |
| 777 | struct rb_node *n; |
| 778 | |
| 779 | mutex_lock(&alloc->mutex); |
| 780 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) |
| 781 | print_binder_buffer(m, " buffer", |
| 782 | rb_entry(n, struct binder_buffer, rb_node)); |
| 783 | mutex_unlock(&alloc->mutex); |
| 784 | } |
| 785 | |
| 786 | /** |
| 787 | * binder_alloc_get_allocated_count() - return count of buffers |
| 788 | * @alloc: binder_alloc for this proc |
| 789 | * |
| 790 | * Return: count of allocated buffers |
| 791 | */ |
| 792 | int binder_alloc_get_allocated_count(struct binder_alloc *alloc) |
| 793 | { |
| 794 | struct rb_node *n; |
| 795 | int count = 0; |
| 796 | |
| 797 | mutex_lock(&alloc->mutex); |
| 798 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) |
| 799 | count++; |
| 800 | mutex_unlock(&alloc->mutex); |
| 801 | return count; |
| 802 | } |
| 803 | |
| 804 | |
| 805 | /** |
| 806 | * binder_alloc_vma_close() - invalidate address space |
| 807 | * @alloc: binder_alloc for this proc |
| 808 | * |
| 809 | * Called from binder_vma_close() when releasing address space. |
| 810 | * Clears alloc->vma to prevent new incoming transactions from |
| 811 | * allocating more buffers. |
| 812 | */ |
| 813 | void binder_alloc_vma_close(struct binder_alloc *alloc) |
| 814 | { |
| 815 | WRITE_ONCE(alloc->vma, NULL); |
| 816 | WRITE_ONCE(alloc->vma_vm_mm, NULL); |
| 817 | } |
| 818 | |
| 819 | /** |
| 820 | * binder_alloc_init() - called by binder_open() for per-proc initialization |
| 821 | * @alloc: binder_alloc for this proc |
| 822 | * |
| 823 | * Called from binder_open() to initialize binder_alloc fields for |
| 824 | * new binder proc |
| 825 | */ |
| 826 | void binder_alloc_init(struct binder_alloc *alloc) |
| 827 | { |
| 828 | alloc->tsk = current->group_leader; |
| 829 | alloc->pid = current->group_leader->pid; |
| 830 | mutex_init(&alloc->mutex); |
| 831 | } |
| 832 | |