Mark Rutland | b828f96 | 2016-09-02 10:35:18 +0100 | [diff] [blame] | 1 | /* |
| 2 | * L220/L310 cache controller support |
| 3 | * |
| 4 | * Copyright (C) 2016 ARM Limited |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | */ |
| 19 | #include <linux/errno.h> |
| 20 | #include <linux/hrtimer.h> |
| 21 | #include <linux/io.h> |
| 22 | #include <linux/list.h> |
| 23 | #include <linux/perf_event.h> |
| 24 | #include <linux/printk.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/types.h> |
| 27 | |
| 28 | #include <asm/hardware/cache-l2x0.h> |
| 29 | |
| 30 | #define PMU_NR_COUNTERS 2 |
| 31 | |
| 32 | static void __iomem *l2x0_base; |
| 33 | static struct pmu *l2x0_pmu; |
| 34 | static cpumask_t pmu_cpu; |
| 35 | |
| 36 | static const char *l2x0_name; |
| 37 | |
| 38 | static ktime_t l2x0_pmu_poll_period; |
| 39 | static struct hrtimer l2x0_pmu_hrtimer; |
| 40 | |
| 41 | /* |
| 42 | * The L220/PL310 PMU has two equivalent counters, Counter1 and Counter0. |
| 43 | * Registers controlling these are laid out in pairs, in descending order, i.e. |
| 44 | * the register for Counter1 comes first, followed by the register for |
| 45 | * Counter0. |
| 46 | * We ensure that idx 0 -> Counter0, and idx1 -> Counter1. |
| 47 | */ |
| 48 | static struct perf_event *events[PMU_NR_COUNTERS]; |
| 49 | |
| 50 | /* Find an unused counter */ |
| 51 | static int l2x0_pmu_find_idx(void) |
| 52 | { |
| 53 | int i; |
| 54 | |
| 55 | for (i = 0; i < PMU_NR_COUNTERS; i++) { |
| 56 | if (!events[i]) |
| 57 | return i; |
| 58 | } |
| 59 | |
| 60 | return -1; |
| 61 | } |
| 62 | |
| 63 | /* How many counters are allocated? */ |
| 64 | static int l2x0_pmu_num_active_counters(void) |
| 65 | { |
| 66 | int i, cnt = 0; |
| 67 | |
| 68 | for (i = 0; i < PMU_NR_COUNTERS; i++) { |
| 69 | if (events[i]) |
| 70 | cnt++; |
| 71 | } |
| 72 | |
| 73 | return cnt; |
| 74 | } |
| 75 | |
| 76 | static void l2x0_pmu_counter_config_write(int idx, u32 val) |
| 77 | { |
| 78 | writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT0_CFG - 4 * idx); |
| 79 | } |
| 80 | |
| 81 | static u32 l2x0_pmu_counter_read(int idx) |
| 82 | { |
| 83 | return readl_relaxed(l2x0_base + L2X0_EVENT_CNT0_VAL - 4 * idx); |
| 84 | } |
| 85 | |
| 86 | static void l2x0_pmu_counter_write(int idx, u32 val) |
| 87 | { |
| 88 | writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT0_VAL - 4 * idx); |
| 89 | } |
| 90 | |
| 91 | static void __l2x0_pmu_enable(void) |
| 92 | { |
| 93 | u32 val = readl_relaxed(l2x0_base + L2X0_EVENT_CNT_CTRL); |
| 94 | val |= L2X0_EVENT_CNT_CTRL_ENABLE; |
| 95 | writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT_CTRL); |
| 96 | } |
| 97 | |
| 98 | static void __l2x0_pmu_disable(void) |
| 99 | { |
| 100 | u32 val = readl_relaxed(l2x0_base + L2X0_EVENT_CNT_CTRL); |
| 101 | val &= ~L2X0_EVENT_CNT_CTRL_ENABLE; |
| 102 | writel_relaxed(val, l2x0_base + L2X0_EVENT_CNT_CTRL); |
| 103 | } |
| 104 | |
| 105 | static void l2x0_pmu_enable(struct pmu *pmu) |
| 106 | { |
| 107 | if (l2x0_pmu_num_active_counters() == 0) |
| 108 | return; |
| 109 | |
| 110 | __l2x0_pmu_enable(); |
| 111 | } |
| 112 | |
| 113 | static void l2x0_pmu_disable(struct pmu *pmu) |
| 114 | { |
| 115 | if (l2x0_pmu_num_active_counters() == 0) |
| 116 | return; |
| 117 | |
| 118 | __l2x0_pmu_disable(); |
| 119 | } |
| 120 | |
| 121 | static void warn_if_saturated(u32 count) |
| 122 | { |
| 123 | if (count != 0xffffffff) |
| 124 | return; |
| 125 | |
| 126 | pr_warn_ratelimited("L2X0 counter saturated. Poll period too long\n"); |
| 127 | } |
| 128 | |
| 129 | static void l2x0_pmu_event_read(struct perf_event *event) |
| 130 | { |
| 131 | struct hw_perf_event *hw = &event->hw; |
| 132 | u64 prev_count, new_count, mask; |
| 133 | |
| 134 | do { |
| 135 | prev_count = local64_read(&hw->prev_count); |
| 136 | new_count = l2x0_pmu_counter_read(hw->idx); |
| 137 | } while (local64_xchg(&hw->prev_count, new_count) != prev_count); |
| 138 | |
| 139 | mask = GENMASK_ULL(31, 0); |
| 140 | local64_add((new_count - prev_count) & mask, &event->count); |
| 141 | |
| 142 | warn_if_saturated(new_count); |
| 143 | } |
| 144 | |
| 145 | static void l2x0_pmu_event_configure(struct perf_event *event) |
| 146 | { |
| 147 | struct hw_perf_event *hw = &event->hw; |
| 148 | |
| 149 | /* |
| 150 | * The L2X0 counters saturate at 0xffffffff rather than wrapping, so we |
| 151 | * will *always* lose some number of events when a counter saturates, |
| 152 | * and have no way of detecting how many were lost. |
| 153 | * |
| 154 | * To minimize the impact of this, we try to maximize the period by |
| 155 | * always starting counters at zero. To ensure that group ratios are |
| 156 | * representative, we poll periodically to avoid counters saturating. |
| 157 | * See l2x0_pmu_poll(). |
| 158 | */ |
| 159 | local64_set(&hw->prev_count, 0); |
| 160 | l2x0_pmu_counter_write(hw->idx, 0); |
| 161 | } |
| 162 | |
| 163 | static enum hrtimer_restart l2x0_pmu_poll(struct hrtimer *hrtimer) |
| 164 | { |
| 165 | unsigned long flags; |
| 166 | int i; |
| 167 | |
| 168 | local_irq_save(flags); |
| 169 | __l2x0_pmu_disable(); |
| 170 | |
| 171 | for (i = 0; i < PMU_NR_COUNTERS; i++) { |
| 172 | struct perf_event *event = events[i]; |
| 173 | |
| 174 | if (!event) |
| 175 | continue; |
| 176 | |
| 177 | l2x0_pmu_event_read(event); |
| 178 | l2x0_pmu_event_configure(event); |
| 179 | } |
| 180 | |
| 181 | __l2x0_pmu_enable(); |
| 182 | local_irq_restore(flags); |
| 183 | |
| 184 | hrtimer_forward_now(hrtimer, l2x0_pmu_poll_period); |
| 185 | return HRTIMER_RESTART; |
| 186 | } |
| 187 | |
| 188 | |
| 189 | static void __l2x0_pmu_event_enable(int idx, u32 event) |
| 190 | { |
| 191 | u32 val; |
| 192 | |
| 193 | val = event << L2X0_EVENT_CNT_CFG_SRC_SHIFT; |
| 194 | val |= L2X0_EVENT_CNT_CFG_INT_DISABLED; |
| 195 | l2x0_pmu_counter_config_write(idx, val); |
| 196 | } |
| 197 | |
| 198 | static void l2x0_pmu_event_start(struct perf_event *event, int flags) |
| 199 | { |
| 200 | struct hw_perf_event *hw = &event->hw; |
| 201 | |
| 202 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) |
| 203 | return; |
| 204 | |
| 205 | if (flags & PERF_EF_RELOAD) { |
| 206 | WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE)); |
| 207 | l2x0_pmu_event_configure(event); |
| 208 | } |
| 209 | |
| 210 | hw->state = 0; |
| 211 | |
| 212 | __l2x0_pmu_event_enable(hw->idx, hw->config_base); |
| 213 | } |
| 214 | |
| 215 | static void __l2x0_pmu_event_disable(int idx) |
| 216 | { |
| 217 | u32 val; |
| 218 | |
| 219 | val = L2X0_EVENT_CNT_CFG_SRC_DISABLED << L2X0_EVENT_CNT_CFG_SRC_SHIFT; |
| 220 | val |= L2X0_EVENT_CNT_CFG_INT_DISABLED; |
| 221 | l2x0_pmu_counter_config_write(idx, val); |
| 222 | } |
| 223 | |
| 224 | static void l2x0_pmu_event_stop(struct perf_event *event, int flags) |
| 225 | { |
| 226 | struct hw_perf_event *hw = &event->hw; |
| 227 | |
| 228 | if (WARN_ON_ONCE(event->hw.state & PERF_HES_STOPPED)) |
| 229 | return; |
| 230 | |
| 231 | __l2x0_pmu_event_disable(hw->idx); |
| 232 | |
| 233 | hw->state |= PERF_HES_STOPPED; |
| 234 | |
| 235 | if (flags & PERF_EF_UPDATE) { |
| 236 | l2x0_pmu_event_read(event); |
| 237 | hw->state |= PERF_HES_UPTODATE; |
| 238 | } |
| 239 | } |
| 240 | |
| 241 | static int l2x0_pmu_event_add(struct perf_event *event, int flags) |
| 242 | { |
| 243 | struct hw_perf_event *hw = &event->hw; |
| 244 | int idx = l2x0_pmu_find_idx(); |
| 245 | |
| 246 | if (idx == -1) |
| 247 | return -EAGAIN; |
| 248 | |
| 249 | /* |
| 250 | * Pin the timer, so that the overflows are handled by the chosen |
| 251 | * event->cpu (this is the same one as presented in "cpumask" |
| 252 | * attribute). |
| 253 | */ |
| 254 | if (l2x0_pmu_num_active_counters() == 0) |
| 255 | hrtimer_start(&l2x0_pmu_hrtimer, l2x0_pmu_poll_period, |
| 256 | HRTIMER_MODE_REL_PINNED); |
| 257 | |
| 258 | events[idx] = event; |
| 259 | hw->idx = idx; |
| 260 | |
| 261 | l2x0_pmu_event_configure(event); |
| 262 | |
| 263 | hw->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
| 264 | |
| 265 | if (flags & PERF_EF_START) |
| 266 | l2x0_pmu_event_start(event, 0); |
| 267 | |
| 268 | return 0; |
| 269 | } |
| 270 | |
| 271 | static void l2x0_pmu_event_del(struct perf_event *event, int flags) |
| 272 | { |
| 273 | struct hw_perf_event *hw = &event->hw; |
| 274 | |
| 275 | l2x0_pmu_event_stop(event, PERF_EF_UPDATE); |
| 276 | |
| 277 | events[hw->idx] = NULL; |
| 278 | hw->idx = -1; |
| 279 | |
| 280 | if (l2x0_pmu_num_active_counters() == 0) |
| 281 | hrtimer_cancel(&l2x0_pmu_hrtimer); |
| 282 | } |
| 283 | |
| 284 | static bool l2x0_pmu_group_is_valid(struct perf_event *event) |
| 285 | { |
| 286 | struct pmu *pmu = event->pmu; |
| 287 | struct perf_event *leader = event->group_leader; |
| 288 | struct perf_event *sibling; |
| 289 | int num_hw = 0; |
| 290 | |
| 291 | if (leader->pmu == pmu) |
| 292 | num_hw++; |
| 293 | else if (!is_software_event(leader)) |
| 294 | return false; |
| 295 | |
| 296 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { |
| 297 | if (sibling->pmu == pmu) |
| 298 | num_hw++; |
| 299 | else if (!is_software_event(sibling)) |
| 300 | return false; |
| 301 | } |
| 302 | |
| 303 | return num_hw <= PMU_NR_COUNTERS; |
| 304 | } |
| 305 | |
| 306 | static int l2x0_pmu_event_init(struct perf_event *event) |
| 307 | { |
| 308 | struct hw_perf_event *hw = &event->hw; |
| 309 | |
| 310 | if (event->attr.type != l2x0_pmu->type) |
| 311 | return -ENOENT; |
| 312 | |
| 313 | if (is_sampling_event(event) || |
| 314 | event->attach_state & PERF_ATTACH_TASK) |
| 315 | return -EINVAL; |
| 316 | |
| 317 | if (event->attr.exclude_user || |
| 318 | event->attr.exclude_kernel || |
| 319 | event->attr.exclude_hv || |
| 320 | event->attr.exclude_idle || |
| 321 | event->attr.exclude_host || |
| 322 | event->attr.exclude_guest) |
| 323 | return -EINVAL; |
| 324 | |
| 325 | if (event->cpu < 0) |
| 326 | return -EINVAL; |
| 327 | |
| 328 | if (event->attr.config & ~L2X0_EVENT_CNT_CFG_SRC_MASK) |
| 329 | return -EINVAL; |
| 330 | |
| 331 | hw->config_base = event->attr.config; |
| 332 | |
| 333 | if (!l2x0_pmu_group_is_valid(event)) |
| 334 | return -EINVAL; |
| 335 | |
| 336 | event->cpu = cpumask_first(&pmu_cpu); |
| 337 | |
| 338 | return 0; |
| 339 | } |
| 340 | |
| 341 | struct l2x0_event_attribute { |
| 342 | struct device_attribute attr; |
| 343 | unsigned int config; |
| 344 | bool pl310_only; |
| 345 | }; |
| 346 | |
| 347 | #define L2X0_EVENT_ATTR(_name, _config, _pl310_only) \ |
| 348 | (&((struct l2x0_event_attribute[]) {{ \ |
| 349 | .attr = __ATTR(_name, S_IRUGO, l2x0_pmu_event_show, NULL), \ |
| 350 | .config = _config, \ |
| 351 | .pl310_only = _pl310_only, \ |
| 352 | }})[0].attr.attr) |
| 353 | |
| 354 | #define L220_PLUS_EVENT_ATTR(_name, _config) \ |
| 355 | L2X0_EVENT_ATTR(_name, _config, false) |
| 356 | |
| 357 | #define PL310_EVENT_ATTR(_name, _config) \ |
| 358 | L2X0_EVENT_ATTR(_name, _config, true) |
| 359 | |
| 360 | static ssize_t l2x0_pmu_event_show(struct device *dev, |
| 361 | struct device_attribute *attr, char *buf) |
| 362 | { |
| 363 | struct l2x0_event_attribute *lattr; |
| 364 | |
| 365 | lattr = container_of(attr, typeof(*lattr), attr); |
| 366 | return snprintf(buf, PAGE_SIZE, "config=0x%x\n", lattr->config); |
| 367 | } |
| 368 | |
| 369 | static umode_t l2x0_pmu_event_attr_is_visible(struct kobject *kobj, |
| 370 | struct attribute *attr, |
| 371 | int unused) |
| 372 | { |
| 373 | struct device *dev = kobj_to_dev(kobj); |
| 374 | struct pmu *pmu = dev_get_drvdata(dev); |
| 375 | struct l2x0_event_attribute *lattr; |
| 376 | |
| 377 | lattr = container_of(attr, typeof(*lattr), attr.attr); |
| 378 | |
| 379 | if (!lattr->pl310_only || strcmp("l2c_310", pmu->name) == 0) |
| 380 | return attr->mode; |
| 381 | |
| 382 | return 0; |
| 383 | } |
| 384 | |
| 385 | static struct attribute *l2x0_pmu_event_attrs[] = { |
| 386 | L220_PLUS_EVENT_ATTR(co, 0x1), |
| 387 | L220_PLUS_EVENT_ATTR(drhit, 0x2), |
| 388 | L220_PLUS_EVENT_ATTR(drreq, 0x3), |
| 389 | L220_PLUS_EVENT_ATTR(dwhit, 0x4), |
| 390 | L220_PLUS_EVENT_ATTR(dwreq, 0x5), |
| 391 | L220_PLUS_EVENT_ATTR(dwtreq, 0x6), |
| 392 | L220_PLUS_EVENT_ATTR(irhit, 0x7), |
| 393 | L220_PLUS_EVENT_ATTR(irreq, 0x8), |
| 394 | L220_PLUS_EVENT_ATTR(wa, 0x9), |
| 395 | PL310_EVENT_ATTR(ipfalloc, 0xa), |
| 396 | PL310_EVENT_ATTR(epfhit, 0xb), |
| 397 | PL310_EVENT_ATTR(epfalloc, 0xc), |
| 398 | PL310_EVENT_ATTR(srrcvd, 0xd), |
| 399 | PL310_EVENT_ATTR(srconf, 0xe), |
| 400 | PL310_EVENT_ATTR(epfrcvd, 0xf), |
| 401 | NULL |
| 402 | }; |
| 403 | |
| 404 | static struct attribute_group l2x0_pmu_event_attrs_group = { |
| 405 | .name = "events", |
| 406 | .attrs = l2x0_pmu_event_attrs, |
| 407 | .is_visible = l2x0_pmu_event_attr_is_visible, |
| 408 | }; |
| 409 | |
| 410 | static ssize_t l2x0_pmu_cpumask_show(struct device *dev, |
| 411 | struct device_attribute *attr, char *buf) |
| 412 | { |
| 413 | return cpumap_print_to_pagebuf(true, buf, &pmu_cpu); |
| 414 | } |
| 415 | |
| 416 | static struct device_attribute l2x0_pmu_cpumask_attr = |
| 417 | __ATTR(cpumask, S_IRUGO, l2x0_pmu_cpumask_show, NULL); |
| 418 | |
| 419 | static struct attribute *l2x0_pmu_cpumask_attrs[] = { |
| 420 | &l2x0_pmu_cpumask_attr.attr, |
| 421 | NULL, |
| 422 | }; |
| 423 | |
| 424 | static struct attribute_group l2x0_pmu_cpumask_attr_group = { |
| 425 | .attrs = l2x0_pmu_cpumask_attrs, |
| 426 | }; |
| 427 | |
| 428 | static const struct attribute_group *l2x0_pmu_attr_groups[] = { |
| 429 | &l2x0_pmu_event_attrs_group, |
| 430 | &l2x0_pmu_cpumask_attr_group, |
| 431 | NULL, |
| 432 | }; |
| 433 | |
| 434 | static void l2x0_pmu_reset(void) |
| 435 | { |
| 436 | int i; |
| 437 | |
| 438 | __l2x0_pmu_disable(); |
| 439 | |
| 440 | for (i = 0; i < PMU_NR_COUNTERS; i++) |
| 441 | __l2x0_pmu_event_disable(i); |
| 442 | } |
| 443 | |
| 444 | static int l2x0_pmu_offline_cpu(unsigned int cpu) |
| 445 | { |
| 446 | unsigned int target; |
| 447 | |
| 448 | if (!cpumask_test_and_clear_cpu(cpu, &pmu_cpu)) |
| 449 | return 0; |
| 450 | |
| 451 | target = cpumask_any_but(cpu_online_mask, cpu); |
| 452 | if (target >= nr_cpu_ids) |
| 453 | return 0; |
| 454 | |
| 455 | perf_pmu_migrate_context(l2x0_pmu, cpu, target); |
| 456 | cpumask_set_cpu(target, &pmu_cpu); |
| 457 | |
| 458 | return 0; |
| 459 | } |
| 460 | |
| 461 | void l2x0_pmu_suspend(void) |
| 462 | { |
| 463 | int i; |
| 464 | |
| 465 | if (!l2x0_pmu) |
| 466 | return; |
| 467 | |
| 468 | l2x0_pmu_disable(l2x0_pmu); |
| 469 | |
| 470 | for (i = 0; i < PMU_NR_COUNTERS; i++) { |
| 471 | if (events[i]) |
| 472 | l2x0_pmu_event_stop(events[i], PERF_EF_UPDATE); |
| 473 | } |
| 474 | |
| 475 | } |
| 476 | |
| 477 | void l2x0_pmu_resume(void) |
| 478 | { |
| 479 | int i; |
| 480 | |
| 481 | if (!l2x0_pmu) |
| 482 | return; |
| 483 | |
| 484 | l2x0_pmu_reset(); |
| 485 | |
| 486 | for (i = 0; i < PMU_NR_COUNTERS; i++) { |
| 487 | if (events[i]) |
| 488 | l2x0_pmu_event_start(events[i], PERF_EF_RELOAD); |
| 489 | } |
| 490 | |
| 491 | l2x0_pmu_enable(l2x0_pmu); |
| 492 | } |
| 493 | |
| 494 | void __init l2x0_pmu_register(void __iomem *base, u32 part) |
| 495 | { |
| 496 | /* |
| 497 | * Determine whether we support the PMU, and choose the name for sysfs. |
| 498 | * This is also used by l2x0_pmu_event_attr_is_visible to determine |
| 499 | * which events to display, as the PL310 PMU supports a superset of |
| 500 | * L220 events. |
| 501 | * |
| 502 | * The L210 PMU has a different programmer's interface, and is not |
| 503 | * supported by this driver. |
| 504 | * |
| 505 | * We must defer registering the PMU until the perf subsystem is up and |
| 506 | * running, so just stash the name and base, and leave that to another |
| 507 | * initcall. |
| 508 | */ |
| 509 | switch (part & L2X0_CACHE_ID_PART_MASK) { |
| 510 | case L2X0_CACHE_ID_PART_L220: |
| 511 | l2x0_name = "l2c_220"; |
| 512 | break; |
| 513 | case L2X0_CACHE_ID_PART_L310: |
| 514 | l2x0_name = "l2c_310"; |
| 515 | break; |
| 516 | default: |
| 517 | return; |
| 518 | } |
| 519 | |
| 520 | l2x0_base = base; |
| 521 | } |
| 522 | |
| 523 | static __init int l2x0_pmu_init(void) |
| 524 | { |
| 525 | int ret; |
| 526 | |
| 527 | if (!l2x0_base) |
| 528 | return 0; |
| 529 | |
| 530 | l2x0_pmu = kzalloc(sizeof(*l2x0_pmu), GFP_KERNEL); |
| 531 | if (!l2x0_pmu) { |
| 532 | pr_warn("Unable to allocate L2x0 PMU\n"); |
| 533 | return -ENOMEM; |
| 534 | } |
| 535 | |
| 536 | *l2x0_pmu = (struct pmu) { |
| 537 | .task_ctx_nr = perf_invalid_context, |
| 538 | .pmu_enable = l2x0_pmu_enable, |
| 539 | .pmu_disable = l2x0_pmu_disable, |
| 540 | .read = l2x0_pmu_event_read, |
| 541 | .start = l2x0_pmu_event_start, |
| 542 | .stop = l2x0_pmu_event_stop, |
| 543 | .add = l2x0_pmu_event_add, |
| 544 | .del = l2x0_pmu_event_del, |
| 545 | .event_init = l2x0_pmu_event_init, |
| 546 | .attr_groups = l2x0_pmu_attr_groups, |
| 547 | }; |
| 548 | |
| 549 | l2x0_pmu_reset(); |
| 550 | |
| 551 | /* |
| 552 | * We always use a hrtimer rather than an interrupt. |
| 553 | * See comments in l2x0_pmu_event_configure and l2x0_pmu_poll. |
| 554 | * |
| 555 | * Polling once a second allows the counters to fill up to 1/128th on a |
| 556 | * quad-core test chip with cores clocked at 400MHz. Hopefully this |
| 557 | * leaves sufficient headroom to avoid overflow on production silicon |
| 558 | * at higher frequencies. |
| 559 | */ |
| 560 | l2x0_pmu_poll_period = ms_to_ktime(1000); |
| 561 | hrtimer_init(&l2x0_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 562 | l2x0_pmu_hrtimer.function = l2x0_pmu_poll; |
| 563 | |
| 564 | cpumask_set_cpu(0, &pmu_cpu); |
| 565 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE, |
| 566 | "AP_PERF_ARM_L2X0_ONLINE", NULL, |
| 567 | l2x0_pmu_offline_cpu); |
| 568 | if (ret) |
| 569 | goto out_pmu; |
| 570 | |
| 571 | ret = perf_pmu_register(l2x0_pmu, l2x0_name, -1); |
| 572 | if (ret) |
| 573 | goto out_cpuhp; |
| 574 | |
| 575 | return 0; |
| 576 | |
| 577 | out_cpuhp: |
| 578 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE); |
| 579 | out_pmu: |
| 580 | kfree(l2x0_pmu); |
| 581 | l2x0_pmu = NULL; |
| 582 | return ret; |
| 583 | } |
| 584 | device_initcall(l2x0_pmu_init); |