Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Performance events ring-buffer code: |
| 3 | * |
| 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
Al Viro | d36b691 | 2011-12-29 17:09:01 -0500 | [diff] [blame] | 7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 8 | * |
| 9 | * For licensing details see kernel-base/COPYING |
| 10 | */ |
| 11 | |
| 12 | #include <linux/perf_event.h> |
| 13 | #include <linux/vmalloc.h> |
| 14 | #include <linux/slab.h> |
| 15 | |
| 16 | #include "internal.h" |
| 17 | |
| 18 | static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, |
| 19 | unsigned long offset, unsigned long head) |
| 20 | { |
Stephane Eranian | dd9c086 | 2013-03-18 14:33:28 +0100 | [diff] [blame] | 21 | unsigned long sz = perf_data_size(rb); |
| 22 | unsigned long mask = sz - 1; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 23 | |
Stephane Eranian | dd9c086 | 2013-03-18 14:33:28 +0100 | [diff] [blame] | 24 | /* |
| 25 | * check if user-writable |
| 26 | * overwrite : over-write its own tail |
| 27 | * !overwrite: buffer possibly drops events. |
| 28 | */ |
| 29 | if (rb->overwrite) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 30 | return true; |
| 31 | |
Stephane Eranian | dd9c086 | 2013-03-18 14:33:28 +0100 | [diff] [blame] | 32 | /* |
| 33 | * verify that payload is not bigger than buffer |
| 34 | * otherwise masking logic may fail to detect |
| 35 | * the "not enough space" condition |
| 36 | */ |
| 37 | if ((head - offset) > sz) |
| 38 | return false; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 39 | |
| 40 | offset = (offset - tail) & mask; |
| 41 | head = (head - tail) & mask; |
| 42 | |
| 43 | if ((int)(head - offset) < 0) |
| 44 | return false; |
| 45 | |
| 46 | return true; |
| 47 | } |
| 48 | |
| 49 | static void perf_output_wakeup(struct perf_output_handle *handle) |
| 50 | { |
| 51 | atomic_set(&handle->rb->poll, POLL_IN); |
| 52 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 53 | handle->event->pending_wakeup = 1; |
| 54 | irq_work_queue(&handle->event->pending); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 55 | } |
| 56 | |
| 57 | /* |
| 58 | * We need to ensure a later event_id doesn't publish a head when a former |
| 59 | * event isn't done writing. However since we need to deal with NMIs we |
| 60 | * cannot fully serialize things. |
| 61 | * |
| 62 | * We only publish the head (and generate a wakeup) when the outer-most |
| 63 | * event completes. |
| 64 | */ |
| 65 | static void perf_output_get_handle(struct perf_output_handle *handle) |
| 66 | { |
| 67 | struct ring_buffer *rb = handle->rb; |
| 68 | |
| 69 | preempt_disable(); |
| 70 | local_inc(&rb->nest); |
| 71 | handle->wakeup = local_read(&rb->wakeup); |
| 72 | } |
| 73 | |
| 74 | static void perf_output_put_handle(struct perf_output_handle *handle) |
| 75 | { |
| 76 | struct ring_buffer *rb = handle->rb; |
| 77 | unsigned long head; |
| 78 | |
| 79 | again: |
| 80 | head = local_read(&rb->head); |
| 81 | |
| 82 | /* |
| 83 | * IRQ/NMI can happen here, which means we can miss a head update. |
| 84 | */ |
| 85 | |
| 86 | if (!local_dec_and_test(&rb->nest)) |
| 87 | goto out; |
| 88 | |
| 89 | /* |
Peter Zijlstra | bf378d3 | 2013-10-28 13:55:29 +0100 | [diff] [blame] | 90 | * Since the mmap() consumer (userspace) can run on a different CPU: |
| 91 | * |
| 92 | * kernel user |
| 93 | * |
| 94 | * READ ->data_tail READ ->data_head |
| 95 | * smp_mb() (A) smp_rmb() (C) |
| 96 | * WRITE $data READ $data |
| 97 | * smp_wmb() (B) smp_mb() (D) |
| 98 | * STORE ->data_head WRITE ->data_tail |
| 99 | * |
| 100 | * Where A pairs with D, and B pairs with C. |
| 101 | * |
| 102 | * I don't think A needs to be a full barrier because we won't in fact |
| 103 | * write data until we see the store from userspace. So we simply don't |
| 104 | * issue the data WRITE until we observe it. Be conservative for now. |
| 105 | * |
| 106 | * OTOH, D needs to be a full barrier since it separates the data READ |
| 107 | * from the tail WRITE. |
| 108 | * |
| 109 | * For B a WMB is sufficient since it separates two WRITEs, and for C |
| 110 | * an RMB is sufficient since it separates two READs. |
| 111 | * |
| 112 | * See perf_output_begin(). |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 113 | */ |
Peter Zijlstra | bf378d3 | 2013-10-28 13:55:29 +0100 | [diff] [blame] | 114 | smp_wmb(); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 115 | rb->user_page->data_head = head; |
| 116 | |
| 117 | /* |
| 118 | * Now check if we missed an update, rely on the (compiler) |
| 119 | * barrier in atomic_dec_and_test() to re-read rb->head. |
| 120 | */ |
| 121 | if (unlikely(head != local_read(&rb->head))) { |
| 122 | local_inc(&rb->nest); |
| 123 | goto again; |
| 124 | } |
| 125 | |
| 126 | if (handle->wakeup != local_read(&rb->wakeup)) |
| 127 | perf_output_wakeup(handle); |
| 128 | |
| 129 | out: |
| 130 | preempt_enable(); |
| 131 | } |
| 132 | |
| 133 | int perf_output_begin(struct perf_output_handle *handle, |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 134 | struct perf_event *event, unsigned int size) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 135 | { |
| 136 | struct ring_buffer *rb; |
| 137 | unsigned long tail, offset, head; |
| 138 | int have_lost; |
| 139 | struct perf_sample_data sample_data; |
| 140 | struct { |
| 141 | struct perf_event_header header; |
| 142 | u64 id; |
| 143 | u64 lost; |
| 144 | } lost_event; |
| 145 | |
| 146 | rcu_read_lock(); |
| 147 | /* |
| 148 | * For inherited events we send all the output towards the parent. |
| 149 | */ |
| 150 | if (event->parent) |
| 151 | event = event->parent; |
| 152 | |
| 153 | rb = rcu_dereference(event->rb); |
| 154 | if (!rb) |
| 155 | goto out; |
| 156 | |
| 157 | handle->rb = rb; |
| 158 | handle->event = event; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 159 | |
| 160 | if (!rb->nr_pages) |
| 161 | goto out; |
| 162 | |
| 163 | have_lost = local_read(&rb->lost); |
| 164 | if (have_lost) { |
| 165 | lost_event.header.size = sizeof(lost_event); |
| 166 | perf_event_header__init_id(&lost_event.header, &sample_data, |
| 167 | event); |
| 168 | size += lost_event.header.size; |
| 169 | } |
| 170 | |
| 171 | perf_output_get_handle(handle); |
| 172 | |
| 173 | do { |
| 174 | /* |
| 175 | * Userspace could choose to issue a mb() before updating the |
| 176 | * tail pointer. So that all reads will be completed before the |
| 177 | * write is issued. |
Peter Zijlstra | bf378d3 | 2013-10-28 13:55:29 +0100 | [diff] [blame] | 178 | * |
| 179 | * See perf_output_put_handle(). |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 180 | */ |
| 181 | tail = ACCESS_ONCE(rb->user_page->data_tail); |
Peter Zijlstra | bf378d3 | 2013-10-28 13:55:29 +0100 | [diff] [blame] | 182 | smp_mb(); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 183 | offset = head = local_read(&rb->head); |
| 184 | head += size; |
| 185 | if (unlikely(!perf_output_space(rb, tail, offset, head))) |
| 186 | goto fail; |
| 187 | } while (local_cmpxchg(&rb->head, offset, head) != offset); |
| 188 | |
| 189 | if (head - local_read(&rb->wakeup) > rb->watermark) |
| 190 | local_add(rb->watermark, &rb->wakeup); |
| 191 | |
| 192 | handle->page = offset >> (PAGE_SHIFT + page_order(rb)); |
| 193 | handle->page &= rb->nr_pages - 1; |
| 194 | handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1); |
| 195 | handle->addr = rb->data_pages[handle->page]; |
| 196 | handle->addr += handle->size; |
| 197 | handle->size = (PAGE_SIZE << page_order(rb)) - handle->size; |
| 198 | |
| 199 | if (have_lost) { |
| 200 | lost_event.header.type = PERF_RECORD_LOST; |
| 201 | lost_event.header.misc = 0; |
| 202 | lost_event.id = event->id; |
| 203 | lost_event.lost = local_xchg(&rb->lost, 0); |
| 204 | |
| 205 | perf_output_put(handle, lost_event); |
| 206 | perf_event__output_id_sample(event, handle, &sample_data); |
| 207 | } |
| 208 | |
| 209 | return 0; |
| 210 | |
| 211 | fail: |
| 212 | local_inc(&rb->lost); |
| 213 | perf_output_put_handle(handle); |
| 214 | out: |
| 215 | rcu_read_unlock(); |
| 216 | |
| 217 | return -ENOSPC; |
| 218 | } |
| 219 | |
Frederic Weisbecker | 91d7753 | 2012-08-07 15:20:38 +0200 | [diff] [blame] | 220 | unsigned int perf_output_copy(struct perf_output_handle *handle, |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 221 | const void *buf, unsigned int len) |
| 222 | { |
Frederic Weisbecker | 91d7753 | 2012-08-07 15:20:38 +0200 | [diff] [blame] | 223 | return __output_copy(handle, buf, len); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 224 | } |
| 225 | |
Jiri Olsa | 5685e0f | 2012-08-07 15:20:39 +0200 | [diff] [blame] | 226 | unsigned int perf_output_skip(struct perf_output_handle *handle, |
| 227 | unsigned int len) |
| 228 | { |
| 229 | return __output_skip(handle, NULL, len); |
| 230 | } |
| 231 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 232 | void perf_output_end(struct perf_output_handle *handle) |
| 233 | { |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 234 | perf_output_put_handle(handle); |
| 235 | rcu_read_unlock(); |
| 236 | } |
| 237 | |
| 238 | static void |
| 239 | ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) |
| 240 | { |
| 241 | long max_size = perf_data_size(rb); |
| 242 | |
| 243 | if (watermark) |
| 244 | rb->watermark = min(max_size, watermark); |
| 245 | |
| 246 | if (!rb->watermark) |
| 247 | rb->watermark = max_size / 2; |
| 248 | |
| 249 | if (flags & RING_BUFFER_WRITABLE) |
Stephane Eranian | dd9c086 | 2013-03-18 14:33:28 +0100 | [diff] [blame] | 250 | rb->overwrite = 0; |
| 251 | else |
| 252 | rb->overwrite = 1; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 253 | |
| 254 | atomic_set(&rb->refcount, 1); |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 255 | |
| 256 | INIT_LIST_HEAD(&rb->event_list); |
| 257 | spin_lock_init(&rb->event_lock); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | #ifndef CONFIG_PERF_USE_VMALLOC |
| 261 | |
| 262 | /* |
| 263 | * Back perf_mmap() with regular GFP_KERNEL-0 pages. |
| 264 | */ |
| 265 | |
| 266 | struct page * |
| 267 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) |
| 268 | { |
| 269 | if (pgoff > rb->nr_pages) |
| 270 | return NULL; |
| 271 | |
| 272 | if (pgoff == 0) |
| 273 | return virt_to_page(rb->user_page); |
| 274 | |
| 275 | return virt_to_page(rb->data_pages[pgoff - 1]); |
| 276 | } |
| 277 | |
| 278 | static void *perf_mmap_alloc_page(int cpu) |
| 279 | { |
| 280 | struct page *page; |
| 281 | int node; |
| 282 | |
| 283 | node = (cpu == -1) ? cpu : cpu_to_node(cpu); |
| 284 | page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); |
| 285 | if (!page) |
| 286 | return NULL; |
| 287 | |
| 288 | return page_address(page); |
| 289 | } |
| 290 | |
| 291 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) |
| 292 | { |
| 293 | struct ring_buffer *rb; |
| 294 | unsigned long size; |
| 295 | int i; |
| 296 | |
| 297 | size = sizeof(struct ring_buffer); |
| 298 | size += nr_pages * sizeof(void *); |
| 299 | |
| 300 | rb = kzalloc(size, GFP_KERNEL); |
| 301 | if (!rb) |
| 302 | goto fail; |
| 303 | |
| 304 | rb->user_page = perf_mmap_alloc_page(cpu); |
| 305 | if (!rb->user_page) |
| 306 | goto fail_user_page; |
| 307 | |
| 308 | for (i = 0; i < nr_pages; i++) { |
| 309 | rb->data_pages[i] = perf_mmap_alloc_page(cpu); |
| 310 | if (!rb->data_pages[i]) |
| 311 | goto fail_data_pages; |
| 312 | } |
| 313 | |
| 314 | rb->nr_pages = nr_pages; |
| 315 | |
| 316 | ring_buffer_init(rb, watermark, flags); |
| 317 | |
| 318 | return rb; |
| 319 | |
| 320 | fail_data_pages: |
| 321 | for (i--; i >= 0; i--) |
| 322 | free_page((unsigned long)rb->data_pages[i]); |
| 323 | |
| 324 | free_page((unsigned long)rb->user_page); |
| 325 | |
| 326 | fail_user_page: |
| 327 | kfree(rb); |
| 328 | |
| 329 | fail: |
| 330 | return NULL; |
| 331 | } |
| 332 | |
| 333 | static void perf_mmap_free_page(unsigned long addr) |
| 334 | { |
| 335 | struct page *page = virt_to_page((void *)addr); |
| 336 | |
| 337 | page->mapping = NULL; |
| 338 | __free_page(page); |
| 339 | } |
| 340 | |
| 341 | void rb_free(struct ring_buffer *rb) |
| 342 | { |
| 343 | int i; |
| 344 | |
| 345 | perf_mmap_free_page((unsigned long)rb->user_page); |
| 346 | for (i = 0; i < rb->nr_pages; i++) |
| 347 | perf_mmap_free_page((unsigned long)rb->data_pages[i]); |
| 348 | kfree(rb); |
| 349 | } |
| 350 | |
| 351 | #else |
Jiri Olsa | 5919b30 | 2013-03-19 15:35:09 +0100 | [diff] [blame] | 352 | static int data_page_nr(struct ring_buffer *rb) |
| 353 | { |
| 354 | return rb->nr_pages << page_order(rb); |
| 355 | } |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 356 | |
| 357 | struct page * |
| 358 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff) |
| 359 | { |
Jiri Olsa | 5919b30 | 2013-03-19 15:35:09 +0100 | [diff] [blame] | 360 | /* The '>' counts in the user page. */ |
| 361 | if (pgoff > data_page_nr(rb)) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 362 | return NULL; |
| 363 | |
| 364 | return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE); |
| 365 | } |
| 366 | |
| 367 | static void perf_mmap_unmark_page(void *addr) |
| 368 | { |
| 369 | struct page *page = vmalloc_to_page(addr); |
| 370 | |
| 371 | page->mapping = NULL; |
| 372 | } |
| 373 | |
| 374 | static void rb_free_work(struct work_struct *work) |
| 375 | { |
| 376 | struct ring_buffer *rb; |
| 377 | void *base; |
| 378 | int i, nr; |
| 379 | |
| 380 | rb = container_of(work, struct ring_buffer, work); |
Jiri Olsa | 5919b30 | 2013-03-19 15:35:09 +0100 | [diff] [blame] | 381 | nr = data_page_nr(rb); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 382 | |
| 383 | base = rb->user_page; |
Jiri Olsa | 5919b30 | 2013-03-19 15:35:09 +0100 | [diff] [blame] | 384 | /* The '<=' counts in the user page. */ |
| 385 | for (i = 0; i <= nr; i++) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 386 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); |
| 387 | |
| 388 | vfree(base); |
| 389 | kfree(rb); |
| 390 | } |
| 391 | |
| 392 | void rb_free(struct ring_buffer *rb) |
| 393 | { |
| 394 | schedule_work(&rb->work); |
| 395 | } |
| 396 | |
| 397 | struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) |
| 398 | { |
| 399 | struct ring_buffer *rb; |
| 400 | unsigned long size; |
| 401 | void *all_buf; |
| 402 | |
| 403 | size = sizeof(struct ring_buffer); |
| 404 | size += sizeof(void *); |
| 405 | |
| 406 | rb = kzalloc(size, GFP_KERNEL); |
| 407 | if (!rb) |
| 408 | goto fail; |
| 409 | |
| 410 | INIT_WORK(&rb->work, rb_free_work); |
| 411 | |
| 412 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); |
| 413 | if (!all_buf) |
| 414 | goto fail_all_buf; |
| 415 | |
| 416 | rb->user_page = all_buf; |
| 417 | rb->data_pages[0] = all_buf + PAGE_SIZE; |
| 418 | rb->page_order = ilog2(nr_pages); |
Jiri Olsa | 5919b30 | 2013-03-19 15:35:09 +0100 | [diff] [blame] | 419 | rb->nr_pages = !!nr_pages; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 420 | |
| 421 | ring_buffer_init(rb, watermark, flags); |
| 422 | |
| 423 | return rb; |
| 424 | |
| 425 | fail_all_buf: |
| 426 | kfree(rb); |
| 427 | |
| 428 | fail: |
| 429 | return NULL; |
| 430 | } |
| 431 | |
| 432 | #endif |