Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 2 | /* |
Will Deacon | 4b47e57 | 2018-10-05 13:31:10 +0100 | [diff] [blame] | 3 | * ARMv8 PMUv3 Performance Events handling code. |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 4 | * |
| 5 | * Copyright (C) 2012 ARM Limited |
| 6 | * Author: Will Deacon <will.deacon@arm.com> |
| 7 | * |
| 8 | * This code is based heavily on the ARMv7 perf event code. |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 9 | */ |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 10 | |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 11 | #include <asm/irq_regs.h> |
Shannon Zhao | b8cfadf | 2016-03-24 16:01:16 +0000 | [diff] [blame] | 12 | #include <asm/perf_event.h> |
Ashok Kumar | bf2d478 | 2016-04-21 05:58:43 -0700 | [diff] [blame] | 13 | #include <asm/sysreg.h> |
Marc Zyngier | d98ecda | 2016-01-25 17:31:13 +0000 | [diff] [blame] | 14 | #include <asm/virt.h> |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 15 | |
Peter Zijlstra | 279a811 | 2020-07-16 13:11:27 +0800 | [diff] [blame] | 16 | #include <clocksource/arm_arch_timer.h> |
| 17 | |
Mark Salter | dbee3a7 | 2016-09-14 17:32:29 -0500 | [diff] [blame] | 18 | #include <linux/acpi.h> |
Michael O'Farrell | 9d2dcc8f | 2018-07-30 13:14:34 -0700 | [diff] [blame] | 19 | #include <linux/clocksource.h> |
Andrew Murray | d1947bc | 2019-04-09 20:22:13 +0100 | [diff] [blame] | 20 | #include <linux/kvm_host.h> |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 21 | #include <linux/of.h> |
| 22 | #include <linux/perf/arm_pmu.h> |
| 23 | #include <linux/platform_device.h> |
Peter Zijlstra | 950b74dd | 2020-07-16 13:11:26 +0800 | [diff] [blame] | 24 | #include <linux/sched_clock.h> |
Raphael Gault | d91cc2f | 2019-08-20 16:57:45 +0100 | [diff] [blame] | 25 | #include <linux/smp.h> |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 26 | |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 27 | /* ARMv8 Cortex-A53 specific event types. */ |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 28 | #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 29 | |
Jan Glauber | d0aa2bf | 2016-02-18 17:50:11 +0100 | [diff] [blame] | 30 | /* ARMv8 Cavium ThunderX specific event types. */ |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 31 | #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9 |
| 32 | #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA |
| 33 | #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB |
| 34 | #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC |
| 35 | #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED |
Mark Rutland | 62a4dda | 2015-10-02 10:55:05 +0100 | [diff] [blame] | 36 | |
Jeremy Linton | 236b9b91 | 2016-09-14 17:32:30 -0500 | [diff] [blame] | 37 | /* |
| 38 | * ARMv8 Architectural defined events, not all of these may |
Will Deacon | 342e53b | 2018-10-05 13:28:07 +0100 | [diff] [blame] | 39 | * be supported on any given implementation. Unsupported events will |
| 40 | * be disabled at run-time based on the PMCEID registers. |
Jeremy Linton | 236b9b91 | 2016-09-14 17:32:30 -0500 | [diff] [blame] | 41 | */ |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 42 | static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { |
Mark Rutland | ae2fb7e | 2015-07-21 11:36:39 +0100 | [diff] [blame] | 43 | PERF_MAP_ALL_UNSUPPORTED, |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 44 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES, |
| 45 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED, |
| 46 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, |
| 47 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, |
Jeremy Linton | 236b9b91 | 2016-09-14 17:32:30 -0500 | [diff] [blame] | 48 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED, |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 49 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, |
Jeremy Linton | 236b9b91 | 2016-09-14 17:32:30 -0500 | [diff] [blame] | 50 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES, |
| 51 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND, |
| 52 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND, |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 53 | }; |
| 54 | |
| 55 | static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
| 56 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 57 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
Mark Rutland | ae2fb7e | 2015-07-21 11:36:39 +0100 | [diff] [blame] | 58 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
| 59 | |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 60 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, |
| 61 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, |
Mark Rutland | ae2fb7e | 2015-07-21 11:36:39 +0100 | [diff] [blame] | 62 | |
Jeremy Linton | 236b9b91 | 2016-09-14 17:32:30 -0500 | [diff] [blame] | 63 | [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE, |
| 64 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL, |
| 65 | |
| 66 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL, |
| 67 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB, |
| 68 | |
| 69 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL, |
| 70 | [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB, |
| 71 | |
Leo Yan | ffdbd3d | 2020-08-11 13:35:05 +0800 | [diff] [blame] | 72 | [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD, |
| 73 | [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD, |
| 74 | |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 75 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED, |
| 76 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 77 | }; |
| 78 | |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 79 | static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
| 80 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 81 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
| 82 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
| 83 | |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 84 | [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL, |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 85 | |
Julien Thierry | 5cf7fb2 | 2017-07-25 17:27:36 +0100 | [diff] [blame] | 86 | [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, |
| 87 | [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 88 | }; |
| 89 | |
Mark Rutland | 62a4dda | 2015-10-02 10:55:05 +0100 | [diff] [blame] | 90 | static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
| 91 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 92 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
| 93 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
| 94 | |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 95 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, |
| 96 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, |
| 97 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, |
| 98 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR, |
Mark Rutland | 62a4dda | 2015-10-02 10:55:05 +0100 | [diff] [blame] | 99 | |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 100 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, |
| 101 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, |
Mark Rutland | 62a4dda | 2015-10-02 10:55:05 +0100 | [diff] [blame] | 102 | |
Julien Thierry | 5cf7fb2 | 2017-07-25 17:27:36 +0100 | [diff] [blame] | 103 | [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, |
| 104 | [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, |
Mark Rutland | 62a4dda | 2015-10-02 10:55:05 +0100 | [diff] [blame] | 105 | }; |
| 106 | |
Julien Thierry | 5561b6c | 2017-08-09 17:46:38 +0100 | [diff] [blame] | 107 | static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
| 108 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 109 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
| 110 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
| 111 | |
| 112 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, |
| 113 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, |
Julien Thierry | 5561b6c | 2017-08-09 17:46:38 +0100 | [diff] [blame] | 114 | }; |
| 115 | |
Jan Glauber | d0aa2bf | 2016-02-18 17:50:11 +0100 | [diff] [blame] | 116 | static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
| 117 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 118 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
| 119 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
| 120 | |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 121 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, |
| 122 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, |
| 123 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, |
| 124 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST, |
| 125 | [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS, |
| 126 | [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS, |
Jan Glauber | d0aa2bf | 2016-02-18 17:50:11 +0100 | [diff] [blame] | 127 | |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 128 | [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS, |
| 129 | [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS, |
Jan Glauber | d0aa2bf | 2016-02-18 17:50:11 +0100 | [diff] [blame] | 130 | |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 131 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD, |
| 132 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, |
| 133 | [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR, |
| 134 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, |
Mark Rutland | 62a4dda | 2015-10-02 10:55:05 +0100 | [diff] [blame] | 135 | }; |
| 136 | |
Ashok Kumar | 201a72b | 2016-04-21 05:58:45 -0700 | [diff] [blame] | 137 | static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
| 138 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 139 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { |
| 140 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
| 141 | |
| 142 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, |
| 143 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, |
| 144 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, |
| 145 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR, |
| 146 | |
Ashok Kumar | 201a72b | 2016-04-21 05:58:45 -0700 | [diff] [blame] | 147 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD, |
| 148 | [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR, |
| 149 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, |
| 150 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, |
| 151 | |
Ashok Kumar | 201a72b | 2016-04-21 05:58:45 -0700 | [diff] [blame] | 152 | [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, |
| 153 | [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, |
| 154 | }; |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 155 | |
| 156 | static ssize_t |
| 157 | armv8pmu_events_sysfs_show(struct device *dev, |
| 158 | struct device_attribute *attr, char *page) |
| 159 | { |
| 160 | struct perf_pmu_events_attr *pmu_attr; |
| 161 | |
| 162 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); |
| 163 | |
Shaokun Zhang | 539707c | 2020-06-18 21:35:44 +0800 | [diff] [blame] | 164 | return sprintf(page, "event=0x%04llx\n", pmu_attr->id); |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 165 | } |
| 166 | |
Shaokun Zhang | 9ef8567 | 2019-10-30 11:46:17 +0800 | [diff] [blame] | 167 | #define ARMV8_EVENT_ATTR(name, config) \ |
| 168 | (&((struct perf_pmu_events_attr) { \ |
| 169 | .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL), \ |
| 170 | .id = config, \ |
| 171 | }).attr.attr) |
Drew Richardson | 9e9caa6 | 2015-10-22 07:07:32 -0700 | [diff] [blame] | 172 | |
| 173 | static struct attribute *armv8_pmuv3_event_attrs[] = { |
Shaokun Zhang | 9ef8567 | 2019-10-30 11:46:17 +0800 | [diff] [blame] | 174 | ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR), |
| 175 | ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL), |
| 176 | ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL), |
| 177 | ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL), |
| 178 | ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE), |
| 179 | ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL), |
| 180 | ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED), |
| 181 | ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED), |
| 182 | ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED), |
| 183 | ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN), |
| 184 | ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN), |
| 185 | ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED), |
| 186 | ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED), |
| 187 | ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED), |
| 188 | ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED), |
| 189 | ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED), |
| 190 | ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED), |
| 191 | ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES), |
| 192 | ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED), |
| 193 | ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS), |
| 194 | ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE), |
| 195 | ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB), |
| 196 | ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE), |
| 197 | ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL), |
| 198 | ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB), |
| 199 | ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS), |
| 200 | ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR), |
| 201 | ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC), |
| 202 | ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED), |
| 203 | ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES), |
| 204 | /* Don't expose the chain event in /sys, since it's useless in isolation */ |
| 205 | ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE), |
| 206 | ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE), |
| 207 | ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED), |
| 208 | ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED), |
| 209 | ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND), |
| 210 | ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND), |
| 211 | ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB), |
| 212 | ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB), |
| 213 | ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE), |
| 214 | ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL), |
| 215 | ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE), |
| 216 | ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL), |
| 217 | ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE), |
| 218 | ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB), |
| 219 | ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL), |
| 220 | ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL), |
| 221 | ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB), |
| 222 | ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB), |
| 223 | ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS), |
| 224 | ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE), |
| 225 | ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS), |
| 226 | ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK), |
| 227 | ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK), |
| 228 | ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD), |
| 229 | ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD), |
| 230 | ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD), |
Shaokun Zhang | 55fdc1f | 2020-07-21 18:49:33 +0800 | [diff] [blame] | 231 | ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD), |
| 232 | ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED), |
| 233 | ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC), |
| 234 | ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL), |
| 235 | ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND), |
| 236 | ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND), |
| 237 | ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT), |
Shaokun Zhang | 9ef8567 | 2019-10-30 11:46:17 +0800 | [diff] [blame] | 238 | ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP), |
| 239 | ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED), |
| 240 | ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE), |
| 241 | ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION), |
Shaokun Zhang | 55fdc1f | 2020-07-21 18:49:33 +0800 | [diff] [blame] | 242 | ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES), |
| 243 | ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM), |
| 244 | ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS), |
| 245 | ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD), |
| 246 | ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS), |
| 247 | ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD), |
| 248 | ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT), |
| 249 | ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT), |
| 250 | ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT), |
| 251 | ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED), |
| 252 | ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD), |
| 253 | ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR), |
Will Deacon | 57d7412 | 2015-12-22 14:42:57 +0000 | [diff] [blame] | 254 | NULL, |
Drew Richardson | 9e9caa6 | 2015-10-22 07:07:32 -0700 | [diff] [blame] | 255 | }; |
| 256 | |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 257 | static umode_t |
| 258 | armv8pmu_event_attr_is_visible(struct kobject *kobj, |
| 259 | struct attribute *attr, int unused) |
| 260 | { |
| 261 | struct device *dev = kobj_to_dev(kobj); |
| 262 | struct pmu *pmu = dev_get_drvdata(dev); |
| 263 | struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); |
| 264 | struct perf_pmu_events_attr *pmu_attr; |
| 265 | |
| 266 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); |
| 267 | |
Will Deacon | 342e53b | 2018-10-05 13:28:07 +0100 | [diff] [blame] | 268 | if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS && |
| 269 | test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap)) |
| 270 | return attr->mode; |
| 271 | |
Shaokun Zhang | 539707c | 2020-06-18 21:35:44 +0800 | [diff] [blame] | 272 | if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) { |
| 273 | u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE; |
| 274 | |
| 275 | if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS && |
| 276 | test_bit(id, cpu_pmu->pmceid_ext_bitmap)) |
| 277 | return attr->mode; |
| 278 | } |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 279 | |
| 280 | return 0; |
| 281 | } |
| 282 | |
Drew Richardson | 9e9caa6 | 2015-10-22 07:07:32 -0700 | [diff] [blame] | 283 | static struct attribute_group armv8_pmuv3_events_attr_group = { |
| 284 | .name = "events", |
| 285 | .attrs = armv8_pmuv3_event_attrs, |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 286 | .is_visible = armv8pmu_event_attr_is_visible, |
Drew Richardson | 9e9caa6 | 2015-10-22 07:07:32 -0700 | [diff] [blame] | 287 | }; |
| 288 | |
Shaokun Zhang | fe7296e | 2017-05-24 15:43:18 +0800 | [diff] [blame] | 289 | PMU_FORMAT_ATTR(event, "config:0-15"); |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 290 | PMU_FORMAT_ATTR(long, "config1:0"); |
| 291 | |
| 292 | static inline bool armv8pmu_event_is_64bit(struct perf_event *event) |
| 293 | { |
| 294 | return event->attr.config1 & 0x1; |
| 295 | } |
Will Deacon | 57d7412 | 2015-12-22 14:42:57 +0000 | [diff] [blame] | 296 | |
| 297 | static struct attribute *armv8_pmuv3_format_attrs[] = { |
| 298 | &format_attr_event.attr, |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 299 | &format_attr_long.attr, |
Will Deacon | 57d7412 | 2015-12-22 14:42:57 +0000 | [diff] [blame] | 300 | NULL, |
| 301 | }; |
| 302 | |
| 303 | static struct attribute_group armv8_pmuv3_format_attr_group = { |
| 304 | .name = "format", |
| 305 | .attrs = armv8_pmuv3_format_attrs, |
| 306 | }; |
| 307 | |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 308 | static ssize_t slots_show(struct device *dev, struct device_attribute *attr, |
| 309 | char *page) |
| 310 | { |
| 311 | struct pmu *pmu = dev_get_drvdata(dev); |
| 312 | struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); |
| 313 | u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK; |
| 314 | |
| 315 | return snprintf(page, PAGE_SIZE, "0x%08x\n", slots); |
| 316 | } |
| 317 | |
| 318 | static DEVICE_ATTR_RO(slots); |
| 319 | |
| 320 | static struct attribute *armv8_pmuv3_caps_attrs[] = { |
| 321 | &dev_attr_slots.attr, |
| 322 | NULL, |
| 323 | }; |
| 324 | |
| 325 | static struct attribute_group armv8_pmuv3_caps_attr_group = { |
| 326 | .name = "caps", |
| 327 | .attrs = armv8_pmuv3_caps_attrs, |
| 328 | }; |
| 329 | |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 330 | /* |
| 331 | * Perf Events' indices |
| 332 | */ |
| 333 | #define ARMV8_IDX_CYCLE_COUNTER 0 |
| 334 | #define ARMV8_IDX_COUNTER0 1 |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 335 | |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 336 | |
| 337 | /* |
| 338 | * We unconditionally enable ARMv8.5-PMU long event counter support |
| 339 | * (64-bit events) where supported. Indicate if this arm_pmu has long |
| 340 | * event counter support. |
| 341 | */ |
| 342 | static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) |
| 343 | { |
| 344 | return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5); |
| 345 | } |
| 346 | |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 347 | /* |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 348 | * We must chain two programmable counters for 64 bit events, |
| 349 | * except when we have allocated the 64bit cycle counter (for CPU |
| 350 | * cycles event). This must be called only when the event has |
| 351 | * a counter allocated. |
| 352 | */ |
| 353 | static inline bool armv8pmu_event_is_chained(struct perf_event *event) |
| 354 | { |
| 355 | int idx = event->hw.idx; |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 356 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 357 | |
| 358 | return !WARN_ON(idx < 0) && |
| 359 | armv8pmu_event_is_64bit(event) && |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 360 | !armv8pmu_has_long_event(cpu_pmu) && |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 361 | (idx != ARMV8_IDX_CYCLE_COUNTER); |
| 362 | } |
| 363 | |
| 364 | /* |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 365 | * ARMv8 low level PMU access |
| 366 | */ |
| 367 | |
| 368 | /* |
| 369 | * Perf Event to low level counters mapping |
| 370 | */ |
| 371 | #define ARMV8_IDX_TO_COUNTER(x) \ |
Shannon Zhao | b8cfadf | 2016-03-24 16:01:16 +0000 | [diff] [blame] | 372 | (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 373 | |
| 374 | static inline u32 armv8pmu_pmcr_read(void) |
| 375 | { |
Ashok Kumar | bf2d478 | 2016-04-21 05:58:43 -0700 | [diff] [blame] | 376 | return read_sysreg(pmcr_el0); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | static inline void armv8pmu_pmcr_write(u32 val) |
| 380 | { |
Shannon Zhao | b8cfadf | 2016-03-24 16:01:16 +0000 | [diff] [blame] | 381 | val &= ARMV8_PMU_PMCR_MASK; |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 382 | isb(); |
Ashok Kumar | bf2d478 | 2016-04-21 05:58:43 -0700 | [diff] [blame] | 383 | write_sysreg(val, pmcr_el0); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 384 | } |
| 385 | |
| 386 | static inline int armv8pmu_has_overflowed(u32 pmovsr) |
| 387 | { |
Shannon Zhao | b8cfadf | 2016-03-24 16:01:16 +0000 | [diff] [blame] | 388 | return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 389 | } |
| 390 | |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 391 | static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) |
| 392 | { |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 393 | return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 394 | } |
| 395 | |
Suzuki K Poulose | 0c55d19 | 2018-07-10 09:58:02 +0100 | [diff] [blame] | 396 | static inline void armv8pmu_select_counter(int idx) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 397 | { |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 398 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
Ashok Kumar | bf2d478 | 2016-04-21 05:58:43 -0700 | [diff] [blame] | 399 | write_sysreg(counter, pmselr_el0); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 400 | isb(); |
Suzuki K Poulose | 0c55d19 | 2018-07-10 09:58:02 +0100 | [diff] [blame] | 401 | } |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 402 | |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 403 | static inline u64 armv8pmu_read_evcntr(int idx) |
Suzuki K Poulose | 0c55d19 | 2018-07-10 09:58:02 +0100 | [diff] [blame] | 404 | { |
| 405 | armv8pmu_select_counter(idx); |
| 406 | return read_sysreg(pmxevcntr_el0); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 407 | } |
| 408 | |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 409 | static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) |
| 410 | { |
| 411 | int idx = event->hw.idx; |
| 412 | u64 val = 0; |
| 413 | |
| 414 | val = armv8pmu_read_evcntr(idx); |
| 415 | if (armv8pmu_event_is_chained(event)) |
| 416 | val = (val << 32) | armv8pmu_read_evcntr(idx - 1); |
| 417 | return val; |
| 418 | } |
| 419 | |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 420 | /* |
| 421 | * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP |
| 422 | * is set the event counters also become 64-bit counters. Unless the |
| 423 | * user has requested a long counter (attr.config1) then we want to |
| 424 | * interrupt upon 32-bit overflow - we achieve this by applying a bias. |
| 425 | */ |
| 426 | static bool armv8pmu_event_needs_bias(struct perf_event *event) |
| 427 | { |
| 428 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
| 429 | struct hw_perf_event *hwc = &event->hw; |
| 430 | int idx = hwc->idx; |
| 431 | |
| 432 | if (armv8pmu_event_is_64bit(event)) |
| 433 | return false; |
| 434 | |
| 435 | if (armv8pmu_has_long_event(cpu_pmu) || |
| 436 | idx == ARMV8_IDX_CYCLE_COUNTER) |
| 437 | return true; |
| 438 | |
| 439 | return false; |
| 440 | } |
| 441 | |
| 442 | static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value) |
| 443 | { |
| 444 | if (armv8pmu_event_needs_bias(event)) |
| 445 | value |= GENMASK(63, 32); |
| 446 | |
| 447 | return value; |
| 448 | } |
| 449 | |
| 450 | static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value) |
| 451 | { |
| 452 | if (armv8pmu_event_needs_bias(event)) |
| 453 | value &= ~GENMASK(63, 32); |
| 454 | |
| 455 | return value; |
| 456 | } |
| 457 | |
Raphael Gault | 3d659e7 | 2019-04-11 17:16:46 +0100 | [diff] [blame] | 458 | static u64 armv8pmu_read_counter(struct perf_event *event) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 459 | { |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 460 | struct hw_perf_event *hwc = &event->hw; |
| 461 | int idx = hwc->idx; |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 462 | u64 value = 0; |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 463 | |
Qi Liu | 44fdf4e | 2020-09-04 17:57:38 +0800 | [diff] [blame] | 464 | if (idx == ARMV8_IDX_CYCLE_COUNTER) |
Ashok Kumar | bf2d478 | 2016-04-21 05:58:43 -0700 | [diff] [blame] | 465 | value = read_sysreg(pmccntr_el0); |
Suzuki K Poulose | 0c55d19 | 2018-07-10 09:58:02 +0100 | [diff] [blame] | 466 | else |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 467 | value = armv8pmu_read_hw_counter(event); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 468 | |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 469 | return armv8pmu_unbias_long_counter(event, value); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 470 | } |
| 471 | |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 472 | static inline void armv8pmu_write_evcntr(int idx, u64 value) |
Suzuki K Poulose | 0c55d19 | 2018-07-10 09:58:02 +0100 | [diff] [blame] | 473 | { |
| 474 | armv8pmu_select_counter(idx); |
| 475 | write_sysreg(value, pmxevcntr_el0); |
| 476 | } |
| 477 | |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 478 | static inline void armv8pmu_write_hw_counter(struct perf_event *event, |
| 479 | u64 value) |
| 480 | { |
| 481 | int idx = event->hw.idx; |
| 482 | |
| 483 | if (armv8pmu_event_is_chained(event)) { |
| 484 | armv8pmu_write_evcntr(idx, upper_32_bits(value)); |
| 485 | armv8pmu_write_evcntr(idx - 1, lower_32_bits(value)); |
| 486 | } else { |
| 487 | armv8pmu_write_evcntr(idx, value); |
| 488 | } |
| 489 | } |
| 490 | |
Raphael Gault | 3d659e7 | 2019-04-11 17:16:46 +0100 | [diff] [blame] | 491 | static void armv8pmu_write_counter(struct perf_event *event, u64 value) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 492 | { |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 493 | struct hw_perf_event *hwc = &event->hw; |
| 494 | int idx = hwc->idx; |
| 495 | |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 496 | value = armv8pmu_bias_long_counter(event, value); |
| 497 | |
Qi Liu | 44fdf4e | 2020-09-04 17:57:38 +0800 | [diff] [blame] | 498 | if (idx == ARMV8_IDX_CYCLE_COUNTER) |
Suzuki K Poulose | 3a95200 | 2018-07-10 09:57:59 +0100 | [diff] [blame] | 499 | write_sysreg(value, pmccntr_el0); |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 500 | else |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 501 | armv8pmu_write_hw_counter(event, value); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 502 | } |
| 503 | |
| 504 | static inline void armv8pmu_write_evtype(int idx, u32 val) |
| 505 | { |
Suzuki K Poulose | 0c55d19 | 2018-07-10 09:58:02 +0100 | [diff] [blame] | 506 | armv8pmu_select_counter(idx); |
| 507 | val &= ARMV8_PMU_EVTYPE_MASK; |
| 508 | write_sysreg(val, pmxevtyper_el0); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 509 | } |
| 510 | |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 511 | static inline void armv8pmu_write_event_type(struct perf_event *event) |
| 512 | { |
| 513 | struct hw_perf_event *hwc = &event->hw; |
| 514 | int idx = hwc->idx; |
| 515 | |
| 516 | /* |
| 517 | * For chained events, the low counter is programmed to count |
| 518 | * the event of interest and the high counter is programmed |
| 519 | * with CHAIN event code with filters set to count at all ELs. |
| 520 | */ |
| 521 | if (armv8pmu_event_is_chained(event)) { |
| 522 | u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN | |
| 523 | ARMV8_PMU_INCLUDE_EL2; |
| 524 | |
| 525 | armv8pmu_write_evtype(idx - 1, hwc->config_base); |
| 526 | armv8pmu_write_evtype(idx, chain_evt); |
| 527 | } else { |
| 528 | armv8pmu_write_evtype(idx, hwc->config_base); |
| 529 | } |
| 530 | } |
| 531 | |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 532 | static u32 armv8pmu_event_cnten_mask(struct perf_event *event) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 533 | { |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 534 | int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); |
| 535 | u32 mask = BIT(counter); |
| 536 | |
| 537 | if (armv8pmu_event_is_chained(event)) |
| 538 | mask |= BIT(counter - 1); |
| 539 | return mask; |
| 540 | } |
| 541 | |
| 542 | static inline void armv8pmu_enable_counter(u32 mask) |
| 543 | { |
| 544 | write_sysreg(mask, pmcntenset_el0); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 545 | } |
| 546 | |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 547 | static inline void armv8pmu_enable_event_counter(struct perf_event *event) |
| 548 | { |
Andrew Murray | d1947bc | 2019-04-09 20:22:13 +0100 | [diff] [blame] | 549 | struct perf_event_attr *attr = &event->attr; |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 550 | u32 mask = armv8pmu_event_cnten_mask(event); |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 551 | |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 552 | kvm_set_pmu_events(mask, attr); |
Andrew Murray | d1947bc | 2019-04-09 20:22:13 +0100 | [diff] [blame] | 553 | |
| 554 | /* We rely on the hypervisor switch code to enable guest counters */ |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 555 | if (!kvm_pmu_counter_deferred(attr)) |
| 556 | armv8pmu_enable_counter(mask); |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 557 | } |
| 558 | |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 559 | static inline void armv8pmu_disable_counter(u32 mask) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 560 | { |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 561 | write_sysreg(mask, pmcntenclr_el0); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 562 | } |
| 563 | |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 564 | static inline void armv8pmu_disable_event_counter(struct perf_event *event) |
| 565 | { |
Andrew Murray | d1947bc | 2019-04-09 20:22:13 +0100 | [diff] [blame] | 566 | struct perf_event_attr *attr = &event->attr; |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 567 | u32 mask = armv8pmu_event_cnten_mask(event); |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 568 | |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 569 | kvm_clr_pmu_events(mask); |
Andrew Murray | d1947bc | 2019-04-09 20:22:13 +0100 | [diff] [blame] | 570 | |
| 571 | /* We rely on the hypervisor switch code to disable guest counters */ |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 572 | if (!kvm_pmu_counter_deferred(attr)) |
| 573 | armv8pmu_disable_counter(mask); |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 574 | } |
| 575 | |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 576 | static inline void armv8pmu_enable_intens(u32 mask) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 577 | { |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 578 | write_sysreg(mask, pmintenset_el1); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 579 | } |
| 580 | |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 581 | static inline void armv8pmu_enable_event_irq(struct perf_event *event) |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 582 | { |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 583 | u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); |
| 584 | armv8pmu_enable_intens(BIT(counter)); |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 585 | } |
| 586 | |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 587 | static inline void armv8pmu_disable_intens(u32 mask) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 588 | { |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 589 | write_sysreg(mask, pmintenclr_el1); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 590 | isb(); |
| 591 | /* Clear the overflow flag in case an interrupt is pending. */ |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 592 | write_sysreg(mask, pmovsclr_el0); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 593 | isb(); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 594 | } |
| 595 | |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 596 | static inline void armv8pmu_disable_event_irq(struct perf_event *event) |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 597 | { |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 598 | u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); |
| 599 | armv8pmu_disable_intens(BIT(counter)); |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 600 | } |
| 601 | |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 602 | static inline u32 armv8pmu_getreset_flags(void) |
| 603 | { |
| 604 | u32 value; |
| 605 | |
| 606 | /* Read */ |
Ashok Kumar | bf2d478 | 2016-04-21 05:58:43 -0700 | [diff] [blame] | 607 | value = read_sysreg(pmovsclr_el0); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 608 | |
| 609 | /* Write to clear flags */ |
Shannon Zhao | b8cfadf | 2016-03-24 16:01:16 +0000 | [diff] [blame] | 610 | value &= ARMV8_PMU_OVSR_MASK; |
Ashok Kumar | bf2d478 | 2016-04-21 05:58:43 -0700 | [diff] [blame] | 611 | write_sysreg(value, pmovsclr_el0); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 612 | |
| 613 | return value; |
| 614 | } |
| 615 | |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 616 | static void armv8pmu_enable_event(struct perf_event *event) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 617 | { |
| 618 | unsigned long flags; |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 619 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
| 620 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 621 | |
| 622 | /* |
| 623 | * Enable counter and interrupt, and set the counter to count |
| 624 | * the event that we're interested in. |
| 625 | */ |
| 626 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 627 | |
| 628 | /* |
| 629 | * Disable counter |
| 630 | */ |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 631 | armv8pmu_disable_event_counter(event); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 632 | |
| 633 | /* |
| 634 | * Set event (if destined for PMNx counters). |
| 635 | */ |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 636 | armv8pmu_write_event_type(event); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 637 | |
| 638 | /* |
| 639 | * Enable interrupt for this counter |
| 640 | */ |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 641 | armv8pmu_enable_event_irq(event); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 642 | |
| 643 | /* |
| 644 | * Enable counter |
| 645 | */ |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 646 | armv8pmu_enable_event_counter(event); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 647 | |
| 648 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 649 | } |
| 650 | |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 651 | static void armv8pmu_disable_event(struct perf_event *event) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 652 | { |
| 653 | unsigned long flags; |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 654 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
| 655 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 656 | |
| 657 | /* |
| 658 | * Disable counter and interrupt |
| 659 | */ |
| 660 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 661 | |
| 662 | /* |
| 663 | * Disable counter |
| 664 | */ |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 665 | armv8pmu_disable_event_counter(event); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 666 | |
| 667 | /* |
| 668 | * Disable interrupt for this counter |
| 669 | */ |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 670 | armv8pmu_disable_event_irq(event); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 671 | |
| 672 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 673 | } |
| 674 | |
Suzuki K Poulose | 3cce50d | 2018-07-10 09:58:03 +0100 | [diff] [blame] | 675 | static void armv8pmu_start(struct arm_pmu *cpu_pmu) |
| 676 | { |
| 677 | unsigned long flags; |
| 678 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
| 679 | |
| 680 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 681 | /* Enable all counters */ |
| 682 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); |
| 683 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 684 | } |
| 685 | |
| 686 | static void armv8pmu_stop(struct arm_pmu *cpu_pmu) |
| 687 | { |
| 688 | unsigned long flags; |
| 689 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); |
| 690 | |
| 691 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 692 | /* Disable all counters */ |
| 693 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); |
| 694 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 695 | } |
| 696 | |
Mark Rutland | 0788f1e | 2018-05-10 11:35:15 +0100 | [diff] [blame] | 697 | static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 698 | { |
| 699 | u32 pmovsr; |
| 700 | struct perf_sample_data data; |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 701 | struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 702 | struct pt_regs *regs; |
| 703 | int idx; |
| 704 | |
| 705 | /* |
| 706 | * Get and reset the IRQ flags |
| 707 | */ |
| 708 | pmovsr = armv8pmu_getreset_flags(); |
| 709 | |
| 710 | /* |
| 711 | * Did an overflow occur? |
| 712 | */ |
| 713 | if (!armv8pmu_has_overflowed(pmovsr)) |
| 714 | return IRQ_NONE; |
| 715 | |
| 716 | /* |
| 717 | * Handle the counter(s) overflow(s) |
| 718 | */ |
| 719 | regs = get_irq_regs(); |
| 720 | |
Suzuki K Poulose | 3cce50d | 2018-07-10 09:58:03 +0100 | [diff] [blame] | 721 | /* |
| 722 | * Stop the PMU while processing the counter overflows |
| 723 | * to prevent skews in group events. |
| 724 | */ |
| 725 | armv8pmu_stop(cpu_pmu); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 726 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
| 727 | struct perf_event *event = cpuc->events[idx]; |
| 728 | struct hw_perf_event *hwc; |
| 729 | |
| 730 | /* Ignore if we don't have an event. */ |
| 731 | if (!event) |
| 732 | continue; |
| 733 | |
| 734 | /* |
| 735 | * We have a single interrupt for all counters. Check that |
| 736 | * each counter has overflowed before we process it. |
| 737 | */ |
| 738 | if (!armv8pmu_counter_has_overflowed(pmovsr, idx)) |
| 739 | continue; |
| 740 | |
| 741 | hwc = &event->hw; |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 742 | armpmu_event_update(event); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 743 | perf_sample_data_init(&data, 0, hwc->last_period); |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 744 | if (!armpmu_event_set_period(event)) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 745 | continue; |
| 746 | |
| 747 | if (perf_event_overflow(event, &data, regs)) |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 748 | cpu_pmu->disable(event); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 749 | } |
Suzuki K Poulose | 3cce50d | 2018-07-10 09:58:03 +0100 | [diff] [blame] | 750 | armv8pmu_start(cpu_pmu); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 751 | |
| 752 | /* |
| 753 | * Handle the pending perf events. |
| 754 | * |
| 755 | * Note: this call *must* be run with interrupts disabled. For |
| 756 | * platforms that can have the PMU interrupts raised as an NMI, this |
| 757 | * will not work. |
| 758 | */ |
| 759 | irq_work_run(); |
| 760 | |
| 761 | return IRQ_HANDLED; |
| 762 | } |
| 763 | |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 764 | static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc, |
| 765 | struct arm_pmu *cpu_pmu) |
| 766 | { |
| 767 | int idx; |
| 768 | |
| 769 | for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) { |
| 770 | if (!test_and_set_bit(idx, cpuc->used_mask)) |
| 771 | return idx; |
| 772 | } |
| 773 | return -EAGAIN; |
| 774 | } |
| 775 | |
| 776 | static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc, |
| 777 | struct arm_pmu *cpu_pmu) |
| 778 | { |
| 779 | int idx; |
| 780 | |
| 781 | /* |
| 782 | * Chaining requires two consecutive event counters, where |
| 783 | * the lower idx must be even. |
| 784 | */ |
| 785 | for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) { |
| 786 | if (!test_and_set_bit(idx, cpuc->used_mask)) { |
| 787 | /* Check if the preceding even counter is available */ |
| 788 | if (!test_and_set_bit(idx - 1, cpuc->used_mask)) |
| 789 | return idx; |
| 790 | /* Release the Odd counter */ |
| 791 | clear_bit(idx, cpuc->used_mask); |
| 792 | } |
| 793 | } |
| 794 | return -EAGAIN; |
| 795 | } |
| 796 | |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 797 | static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 798 | struct perf_event *event) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 799 | { |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 800 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
| 801 | struct hw_perf_event *hwc = &event->hw; |
Shannon Zhao | b8cfadf | 2016-03-24 16:01:16 +0000 | [diff] [blame] | 802 | unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT; |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 803 | |
Pratyush Anand | 1031a15 | 2017-07-01 12:03:35 +0530 | [diff] [blame] | 804 | /* Always prefer to place a cycle counter into the cycle counter. */ |
Ashok Kumar | 03598fd | 2016-04-21 05:58:41 -0700 | [diff] [blame] | 805 | if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { |
Pratyush Anand | 1031a15 | 2017-07-01 12:03:35 +0530 | [diff] [blame] | 806 | if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) |
| 807 | return ARMV8_IDX_CYCLE_COUNTER; |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 808 | } |
| 809 | |
| 810 | /* |
Pratyush Anand | 1031a15 | 2017-07-01 12:03:35 +0530 | [diff] [blame] | 811 | * Otherwise use events counters |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 812 | */ |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 813 | if (armv8pmu_event_is_64bit(event) && |
| 814 | !armv8pmu_has_long_event(cpu_pmu)) |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 815 | return armv8pmu_get_chain_idx(cpuc, cpu_pmu); |
| 816 | else |
| 817 | return armv8pmu_get_single_idx(cpuc, cpu_pmu); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 818 | } |
| 819 | |
Suzuki K Poulose | 7dfc8db | 2018-07-10 09:58:01 +0100 | [diff] [blame] | 820 | static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc, |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 821 | struct perf_event *event) |
Suzuki K Poulose | 7dfc8db | 2018-07-10 09:58:01 +0100 | [diff] [blame] | 822 | { |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 823 | int idx = event->hw.idx; |
| 824 | |
| 825 | clear_bit(idx, cpuc->used_mask); |
| 826 | if (armv8pmu_event_is_chained(event)) |
| 827 | clear_bit(idx - 1, cpuc->used_mask); |
Suzuki K Poulose | 7dfc8db | 2018-07-10 09:58:01 +0100 | [diff] [blame] | 828 | } |
| 829 | |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 830 | /* |
Andrew Murray | b365067 | 2019-01-18 14:02:27 +0000 | [diff] [blame] | 831 | * Add an event filter to a given event. |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 832 | */ |
| 833 | static int armv8pmu_set_event_filter(struct hw_perf_event *event, |
| 834 | struct perf_event_attr *attr) |
| 835 | { |
| 836 | unsigned long config_base = 0; |
| 837 | |
| 838 | if (attr->exclude_idle) |
| 839 | return -EPERM; |
Ganapatrao Kulkarni | 78a19cf | 2017-05-02 21:59:34 +0530 | [diff] [blame] | 840 | |
| 841 | /* |
| 842 | * If we're running in hyp mode, then we *are* the hypervisor. |
| 843 | * Therefore we ignore exclude_hv in this configuration, since |
| 844 | * there's no hypervisor to sample anyway. This is consistent |
| 845 | * with other architectures (x86 and Power). |
| 846 | */ |
| 847 | if (is_kernel_in_hyp_mode()) { |
Andrew Murray | 435e53f | 2019-04-09 20:22:15 +0100 | [diff] [blame] | 848 | if (!attr->exclude_kernel && !attr->exclude_host) |
Ganapatrao Kulkarni | 78a19cf | 2017-05-02 21:59:34 +0530 | [diff] [blame] | 849 | config_base |= ARMV8_PMU_INCLUDE_EL2; |
Andrew Murray | 435e53f | 2019-04-09 20:22:15 +0100 | [diff] [blame] | 850 | if (attr->exclude_guest) |
Ganapatrao Kulkarni | 78a19cf | 2017-05-02 21:59:34 +0530 | [diff] [blame] | 851 | config_base |= ARMV8_PMU_EXCLUDE_EL1; |
Andrew Murray | 435e53f | 2019-04-09 20:22:15 +0100 | [diff] [blame] | 852 | if (attr->exclude_host) |
| 853 | config_base |= ARMV8_PMU_EXCLUDE_EL0; |
Ganapatrao Kulkarni | 78a19cf | 2017-05-02 21:59:34 +0530 | [diff] [blame] | 854 | } else { |
Andrew Murray | d1947bc | 2019-04-09 20:22:13 +0100 | [diff] [blame] | 855 | if (!attr->exclude_hv && !attr->exclude_host) |
Ganapatrao Kulkarni | 78a19cf | 2017-05-02 21:59:34 +0530 | [diff] [blame] | 856 | config_base |= ARMV8_PMU_INCLUDE_EL2; |
| 857 | } |
Andrew Murray | d1947bc | 2019-04-09 20:22:13 +0100 | [diff] [blame] | 858 | |
| 859 | /* |
| 860 | * Filter out !VHE kernels and guest kernels |
| 861 | */ |
| 862 | if (attr->exclude_kernel) |
| 863 | config_base |= ARMV8_PMU_EXCLUDE_EL1; |
| 864 | |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 865 | if (attr->exclude_user) |
Shannon Zhao | b8cfadf | 2016-03-24 16:01:16 +0000 | [diff] [blame] | 866 | config_base |= ARMV8_PMU_EXCLUDE_EL0; |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 867 | |
| 868 | /* |
| 869 | * Install the filter into config_base as this is used to |
| 870 | * construct the event type. |
| 871 | */ |
| 872 | event->config_base = config_base; |
| 873 | |
| 874 | return 0; |
| 875 | } |
| 876 | |
Will Deacon | ca2b497 | 2018-10-05 13:24:36 +0100 | [diff] [blame] | 877 | static int armv8pmu_filter_match(struct perf_event *event) |
| 878 | { |
| 879 | unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT; |
| 880 | return evtype != ARMV8_PMUV3_PERFCTR_CHAIN; |
| 881 | } |
| 882 | |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 883 | static void armv8pmu_reset(void *info) |
| 884 | { |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 885 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; |
| 886 | u32 pmcr; |
| 887 | |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 888 | /* The counter and interrupt enable registers are unknown at reset. */ |
Robin Murphy | 29227d6 | 2020-03-17 18:22:54 +0000 | [diff] [blame] | 889 | armv8pmu_disable_counter(U32_MAX); |
| 890 | armv8pmu_disable_intens(U32_MAX); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 891 | |
Andrew Murray | d1947bc | 2019-04-09 20:22:13 +0100 | [diff] [blame] | 892 | /* Clear the counters we flip at guest entry/exit */ |
| 893 | kvm_clr_pmu_events(U32_MAX); |
| 894 | |
Jan Glauber | 7175f05 | 2016-02-18 17:50:13 +0100 | [diff] [blame] | 895 | /* |
| 896 | * Initialize & Reset PMNC. Request overflow interrupt for |
| 897 | * 64 bit cycle counter but cheat in armv8pmu_write_counter(). |
| 898 | */ |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 899 | pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC; |
| 900 | |
| 901 | /* Enable long event counter support where available */ |
| 902 | if (armv8pmu_has_long_event(cpu_pmu)) |
| 903 | pmcr |= ARMV8_PMU_PMCR_LP; |
| 904 | |
| 905 | armv8pmu_pmcr_write(pmcr); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 906 | } |
| 907 | |
Will Deacon | 6c833bb | 2017-08-08 16:58:33 +0100 | [diff] [blame] | 908 | static int __armv8_pmuv3_map_event(struct perf_event *event, |
| 909 | const unsigned (*extra_event_map) |
| 910 | [PERF_COUNT_HW_MAX], |
| 911 | const unsigned (*extra_cache_map) |
| 912 | [PERF_COUNT_HW_CACHE_MAX] |
| 913 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 914 | [PERF_COUNT_HW_CACHE_RESULT_MAX]) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 915 | { |
Jeremy Linton | 236b9b91 | 2016-09-14 17:32:30 -0500 | [diff] [blame] | 916 | int hw_event_id; |
| 917 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
| 918 | |
| 919 | hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map, |
| 920 | &armv8_pmuv3_perf_cache_map, |
| 921 | ARMV8_PMU_EVTYPE_EVENT); |
Jeremy Linton | 236b9b91 | 2016-09-14 17:32:30 -0500 | [diff] [blame] | 922 | |
Suzuki K Poulose | c132079 | 2018-07-10 09:58:04 +0100 | [diff] [blame] | 923 | if (armv8pmu_event_is_64bit(event)) |
| 924 | event->hw.flags |= ARMPMU_EVT_64BIT; |
| 925 | |
Shaokun Zhang | e2b5c5c | 2018-10-06 15:57:38 +0800 | [diff] [blame] | 926 | /* Only expose micro/arch events supported by this PMU */ |
Will Deacon | 6c833bb | 2017-08-08 16:58:33 +0100 | [diff] [blame] | 927 | if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS) |
| 928 | && test_bit(hw_event_id, armpmu->pmceid_bitmap)) { |
| 929 | return hw_event_id; |
Jeremy Linton | 236b9b91 | 2016-09-14 17:32:30 -0500 | [diff] [blame] | 930 | } |
| 931 | |
Will Deacon | 6c833bb | 2017-08-08 16:58:33 +0100 | [diff] [blame] | 932 | return armpmu_map_event(event, extra_event_map, extra_cache_map, |
| 933 | ARMV8_PMU_EVTYPE_EVENT); |
| 934 | } |
| 935 | |
| 936 | static int armv8_pmuv3_map_event(struct perf_event *event) |
| 937 | { |
| 938 | return __armv8_pmuv3_map_event(event, NULL, NULL); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 939 | } |
| 940 | |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 941 | static int armv8_a53_map_event(struct perf_event *event) |
| 942 | { |
Will Deacon | d0d09d4 | 2017-08-08 17:11:27 +0100 | [diff] [blame] | 943 | return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map); |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 944 | } |
| 945 | |
Mark Rutland | 62a4dda | 2015-10-02 10:55:05 +0100 | [diff] [blame] | 946 | static int armv8_a57_map_event(struct perf_event *event) |
| 947 | { |
Will Deacon | d0d09d4 | 2017-08-08 17:11:27 +0100 | [diff] [blame] | 948 | return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map); |
Mark Rutland | 62a4dda | 2015-10-02 10:55:05 +0100 | [diff] [blame] | 949 | } |
| 950 | |
Julien Thierry | 5561b6c | 2017-08-09 17:46:38 +0100 | [diff] [blame] | 951 | static int armv8_a73_map_event(struct perf_event *event) |
| 952 | { |
| 953 | return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map); |
| 954 | } |
| 955 | |
Jan Glauber | d0aa2bf | 2016-02-18 17:50:11 +0100 | [diff] [blame] | 956 | static int armv8_thunder_map_event(struct perf_event *event) |
| 957 | { |
Will Deacon | d0d09d4 | 2017-08-08 17:11:27 +0100 | [diff] [blame] | 958 | return __armv8_pmuv3_map_event(event, NULL, |
Will Deacon | 6c833bb | 2017-08-08 16:58:33 +0100 | [diff] [blame] | 959 | &armv8_thunder_perf_cache_map); |
Jan Glauber | d0aa2bf | 2016-02-18 17:50:11 +0100 | [diff] [blame] | 960 | } |
| 961 | |
Ashok Kumar | 201a72b | 2016-04-21 05:58:45 -0700 | [diff] [blame] | 962 | static int armv8_vulcan_map_event(struct perf_event *event) |
| 963 | { |
Will Deacon | d0d09d4 | 2017-08-08 17:11:27 +0100 | [diff] [blame] | 964 | return __armv8_pmuv3_map_event(event, NULL, |
Will Deacon | 6c833bb | 2017-08-08 16:58:33 +0100 | [diff] [blame] | 965 | &armv8_vulcan_perf_cache_map); |
Ashok Kumar | 201a72b | 2016-04-21 05:58:45 -0700 | [diff] [blame] | 966 | } |
| 967 | |
Mark Rutland | f1b36dc | 2017-04-11 09:39:56 +0100 | [diff] [blame] | 968 | struct armv8pmu_probe_info { |
| 969 | struct arm_pmu *pmu; |
| 970 | bool present; |
| 971 | }; |
| 972 | |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 973 | static void __armv8pmu_probe_pmu(void *info) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 974 | { |
Mark Rutland | f1b36dc | 2017-04-11 09:39:56 +0100 | [diff] [blame] | 975 | struct armv8pmu_probe_info *probe = info; |
| 976 | struct arm_pmu *cpu_pmu = probe->pmu; |
Mark Rutland | faa9a08 | 2017-04-25 12:08:50 +0100 | [diff] [blame] | 977 | u64 dfr0; |
Will Deacon | 342e53b | 2018-10-05 13:28:07 +0100 | [diff] [blame] | 978 | u64 pmceid_raw[2]; |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 979 | u32 pmceid[2]; |
Mark Rutland | faa9a08 | 2017-04-25 12:08:50 +0100 | [diff] [blame] | 980 | int pmuver; |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 981 | |
Mark Rutland | f1b36dc | 2017-04-11 09:39:56 +0100 | [diff] [blame] | 982 | dfr0 = read_sysreg(id_aa64dfr0_el1); |
Mark Rutland | 0331365 | 2018-02-14 17:21:57 +0000 | [diff] [blame] | 983 | pmuver = cpuid_feature_extract_unsigned_field(dfr0, |
Mark Rutland | f1b36dc | 2017-04-11 09:39:56 +0100 | [diff] [blame] | 984 | ID_AA64DFR0_PMUVER_SHIFT); |
Mark Rutland | 0331365 | 2018-02-14 17:21:57 +0000 | [diff] [blame] | 985 | if (pmuver == 0xf || pmuver == 0) |
Mark Rutland | f1b36dc | 2017-04-11 09:39:56 +0100 | [diff] [blame] | 986 | return; |
| 987 | |
Andrew Murray | 8673e02 | 2020-03-02 18:17:52 +0000 | [diff] [blame] | 988 | cpu_pmu->pmuver = pmuver; |
Mark Rutland | f1b36dc | 2017-04-11 09:39:56 +0100 | [diff] [blame] | 989 | probe->present = true; |
| 990 | |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 991 | /* Read the nb of CNTx counters supported from PMNC */ |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 992 | cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) |
| 993 | & ARMV8_PMU_PMCR_N_MASK; |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 994 | |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 995 | /* Add the CPU cycles counter */ |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 996 | cpu_pmu->num_events += 1; |
| 997 | |
Will Deacon | 342e53b | 2018-10-05 13:28:07 +0100 | [diff] [blame] | 998 | pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0); |
| 999 | pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0); |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 1000 | |
Yury Norov | 3aa5688 | 2018-02-06 15:38:06 -0800 | [diff] [blame] | 1001 | bitmap_from_arr32(cpu_pmu->pmceid_bitmap, |
| 1002 | pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); |
Will Deacon | 342e53b | 2018-10-05 13:28:07 +0100 | [diff] [blame] | 1003 | |
| 1004 | pmceid[0] = pmceid_raw[0] >> 32; |
| 1005 | pmceid[1] = pmceid_raw[1] >> 32; |
| 1006 | |
| 1007 | bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap, |
| 1008 | pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1009 | |
| 1010 | /* store PMMIR_EL1 register for sysfs */ |
| 1011 | if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31))) |
| 1012 | cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); |
| 1013 | else |
| 1014 | cpu_pmu->reg_pmmir = 0; |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1015 | } |
| 1016 | |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 1017 | static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1018 | { |
Mark Rutland | f1b36dc | 2017-04-11 09:39:56 +0100 | [diff] [blame] | 1019 | struct armv8pmu_probe_info probe = { |
| 1020 | .pmu = cpu_pmu, |
| 1021 | .present = false, |
| 1022 | }; |
| 1023 | int ret; |
| 1024 | |
| 1025 | ret = smp_call_function_any(&cpu_pmu->supported_cpus, |
Ashok Kumar | 4b1a9e6 | 2016-04-21 05:58:44 -0700 | [diff] [blame] | 1026 | __armv8pmu_probe_pmu, |
Mark Rutland | f1b36dc | 2017-04-11 09:39:56 +0100 | [diff] [blame] | 1027 | &probe, 1); |
| 1028 | if (ret) |
| 1029 | return ret; |
| 1030 | |
| 1031 | return probe.present ? 0 : -ENODEV; |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1032 | } |
| 1033 | |
Robin Murphy | e424b17 | 2020-02-21 19:35:31 +0000 | [diff] [blame] | 1034 | static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, |
| 1035 | int (*map_event)(struct perf_event *event), |
| 1036 | const struct attribute_group *events, |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1037 | const struct attribute_group *format, |
| 1038 | const struct attribute_group *caps) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1039 | { |
Mark Rutland | f1b36dc | 2017-04-11 09:39:56 +0100 | [diff] [blame] | 1040 | int ret = armv8pmu_probe_pmu(cpu_pmu); |
| 1041 | if (ret) |
| 1042 | return ret; |
| 1043 | |
Will Deacon | d3adeed | 2018-10-05 13:26:21 +0100 | [diff] [blame] | 1044 | cpu_pmu->handle_irq = armv8pmu_handle_irq; |
| 1045 | cpu_pmu->enable = armv8pmu_enable_event; |
| 1046 | cpu_pmu->disable = armv8pmu_disable_event; |
| 1047 | cpu_pmu->read_counter = armv8pmu_read_counter; |
| 1048 | cpu_pmu->write_counter = armv8pmu_write_counter; |
| 1049 | cpu_pmu->get_event_idx = armv8pmu_get_event_idx; |
| 1050 | cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx; |
| 1051 | cpu_pmu->start = armv8pmu_start; |
| 1052 | cpu_pmu->stop = armv8pmu_stop; |
| 1053 | cpu_pmu->reset = armv8pmu_reset; |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 1054 | cpu_pmu->set_event_filter = armv8pmu_set_event_filter; |
Will Deacon | ca2b497 | 2018-10-05 13:24:36 +0100 | [diff] [blame] | 1055 | cpu_pmu->filter_match = armv8pmu_filter_match; |
Mark Rutland | f1b36dc | 2017-04-11 09:39:56 +0100 | [diff] [blame] | 1056 | |
Robin Murphy | e424b17 | 2020-02-21 19:35:31 +0000 | [diff] [blame] | 1057 | cpu_pmu->name = name; |
| 1058 | cpu_pmu->map_event = map_event; |
| 1059 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ? |
| 1060 | events : &armv8_pmuv3_events_attr_group; |
| 1061 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ? |
| 1062 | format : &armv8_pmuv3_format_attr_group; |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1063 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ? |
| 1064 | caps : &armv8_pmuv3_caps_attr_group; |
Robin Murphy | e424b17 | 2020-02-21 19:35:31 +0000 | [diff] [blame] | 1065 | |
Mark Rutland | f1b36dc | 2017-04-11 09:39:56 +0100 | [diff] [blame] | 1066 | return 0; |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 1067 | } |
| 1068 | |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1069 | static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name, |
| 1070 | int (*map_event)(struct perf_event *event)) |
| 1071 | { |
| 1072 | return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL); |
| 1073 | } |
| 1074 | |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 1075 | static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) |
| 1076 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1077 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_pmuv3", |
| 1078 | armv8_pmuv3_map_event); |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 1079 | } |
| 1080 | |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1081 | static int armv8_a34_pmu_init(struct arm_pmu *cpu_pmu) |
| 1082 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1083 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a34", |
| 1084 | armv8_pmuv3_map_event); |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1085 | } |
| 1086 | |
Julien Thierry | e884f80 | 2017-08-09 17:46:39 +0100 | [diff] [blame] | 1087 | static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) |
| 1088 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1089 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35", |
| 1090 | armv8_a53_map_event); |
Julien Thierry | e884f80 | 2017-08-09 17:46:39 +0100 | [diff] [blame] | 1091 | } |
| 1092 | |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 1093 | static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) |
| 1094 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1095 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53", |
| 1096 | armv8_a53_map_event); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1097 | } |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1098 | |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1099 | static int armv8_a55_pmu_init(struct arm_pmu *cpu_pmu) |
| 1100 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1101 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a55", |
| 1102 | armv8_pmuv3_map_event); |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1103 | } |
| 1104 | |
Mark Rutland | 62a4dda | 2015-10-02 10:55:05 +0100 | [diff] [blame] | 1105 | static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) |
| 1106 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1107 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57", |
| 1108 | armv8_a57_map_event); |
Mark Rutland | 62a4dda | 2015-10-02 10:55:05 +0100 | [diff] [blame] | 1109 | } |
| 1110 | |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1111 | static int armv8_a65_pmu_init(struct arm_pmu *cpu_pmu) |
| 1112 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1113 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a65", |
| 1114 | armv8_pmuv3_map_event); |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1115 | } |
| 1116 | |
Will Deacon | 5d7ee87 | 2015-12-22 14:45:35 +0000 | [diff] [blame] | 1117 | static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) |
| 1118 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1119 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72", |
| 1120 | armv8_a57_map_event); |
Will Deacon | 5d7ee87 | 2015-12-22 14:45:35 +0000 | [diff] [blame] | 1121 | } |
| 1122 | |
Julien Thierry | 5561b6c | 2017-08-09 17:46:38 +0100 | [diff] [blame] | 1123 | static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) |
| 1124 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1125 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73", |
| 1126 | armv8_a73_map_event); |
Julien Thierry | 5561b6c | 2017-08-09 17:46:38 +0100 | [diff] [blame] | 1127 | } |
| 1128 | |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1129 | static int armv8_a75_pmu_init(struct arm_pmu *cpu_pmu) |
| 1130 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1131 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a75", |
| 1132 | armv8_pmuv3_map_event); |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1133 | } |
| 1134 | |
| 1135 | static int armv8_a76_pmu_init(struct arm_pmu *cpu_pmu) |
| 1136 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1137 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a76", |
| 1138 | armv8_pmuv3_map_event); |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1139 | } |
| 1140 | |
| 1141 | static int armv8_a77_pmu_init(struct arm_pmu *cpu_pmu) |
| 1142 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1143 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a77", |
| 1144 | armv8_pmuv3_map_event); |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1145 | } |
| 1146 | |
| 1147 | static int armv8_e1_pmu_init(struct arm_pmu *cpu_pmu) |
| 1148 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1149 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_e1", |
| 1150 | armv8_pmuv3_map_event); |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1151 | } |
| 1152 | |
| 1153 | static int armv8_n1_pmu_init(struct arm_pmu *cpu_pmu) |
| 1154 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1155 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_neoverse_n1", |
| 1156 | armv8_pmuv3_map_event); |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1157 | } |
| 1158 | |
Jan Glauber | d0aa2bf | 2016-02-18 17:50:11 +0100 | [diff] [blame] | 1159 | static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) |
| 1160 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1161 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder", |
| 1162 | armv8_thunder_map_event); |
Jan Glauber | d0aa2bf | 2016-02-18 17:50:11 +0100 | [diff] [blame] | 1163 | } |
| 1164 | |
Ashok Kumar | 201a72b | 2016-04-21 05:58:45 -0700 | [diff] [blame] | 1165 | static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) |
| 1166 | { |
Shaokun Zhang | f5be3a6 | 2020-09-22 13:53:45 +0800 | [diff] [blame^] | 1167 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan", |
| 1168 | armv8_vulcan_map_event); |
Ashok Kumar | 201a72b | 2016-04-21 05:58:45 -0700 | [diff] [blame] | 1169 | } |
| 1170 | |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 1171 | static const struct of_device_id armv8_pmu_of_device_ids[] = { |
| 1172 | {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init}, |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1173 | {.compatible = "arm,cortex-a34-pmu", .data = armv8_a34_pmu_init}, |
Julien Thierry | e884f80 | 2017-08-09 17:46:39 +0100 | [diff] [blame] | 1174 | {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init}, |
Mark Rutland | ac82d12 | 2015-10-02 10:55:04 +0100 | [diff] [blame] | 1175 | {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1176 | {.compatible = "arm,cortex-a55-pmu", .data = armv8_a55_pmu_init}, |
Mark Rutland | 62a4dda | 2015-10-02 10:55:05 +0100 | [diff] [blame] | 1177 | {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1178 | {.compatible = "arm,cortex-a65-pmu", .data = armv8_a65_pmu_init}, |
Will Deacon | 5d7ee87 | 2015-12-22 14:45:35 +0000 | [diff] [blame] | 1179 | {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init}, |
Julien Thierry | 5561b6c | 2017-08-09 17:46:38 +0100 | [diff] [blame] | 1180 | {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init}, |
Robin Murphy | 29cc4ce | 2020-02-21 19:35:32 +0000 | [diff] [blame] | 1181 | {.compatible = "arm,cortex-a75-pmu", .data = armv8_a75_pmu_init}, |
| 1182 | {.compatible = "arm,cortex-a76-pmu", .data = armv8_a76_pmu_init}, |
| 1183 | {.compatible = "arm,cortex-a77-pmu", .data = armv8_a77_pmu_init}, |
| 1184 | {.compatible = "arm,neoverse-e1-pmu", .data = armv8_e1_pmu_init}, |
| 1185 | {.compatible = "arm,neoverse-n1-pmu", .data = armv8_n1_pmu_init}, |
Jan Glauber | d0aa2bf | 2016-02-18 17:50:11 +0100 | [diff] [blame] | 1186 | {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init}, |
Ashok Kumar | 201a72b | 2016-04-21 05:58:45 -0700 | [diff] [blame] | 1187 | {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init}, |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1188 | {}, |
| 1189 | }; |
| 1190 | |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 1191 | static int armv8_pmu_device_probe(struct platform_device *pdev) |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1192 | { |
Mark Rutland | f00fa5f | 2017-04-11 09:39:57 +0100 | [diff] [blame] | 1193 | return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL); |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1194 | } |
| 1195 | |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 1196 | static struct platform_driver armv8_pmu_driver = { |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1197 | .driver = { |
Jeremy Linton | 85023b2 | 2016-09-14 17:32:31 -0500 | [diff] [blame] | 1198 | .name = ARMV8_PMU_PDEV_NAME, |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 1199 | .of_match_table = armv8_pmu_of_device_ids, |
Anders Roxell | 81e9fa8 | 2018-10-17 17:26:22 +0200 | [diff] [blame] | 1200 | .suppress_bind_attrs = true, |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1201 | }, |
Mark Rutland | 6475b2d | 2015-10-02 10:55:03 +0100 | [diff] [blame] | 1202 | .probe = armv8_pmu_device_probe, |
Will Deacon | 0308968 | 2012-03-05 11:49:32 +0000 | [diff] [blame] | 1203 | }; |
| 1204 | |
Mark Rutland | f00fa5f | 2017-04-11 09:39:57 +0100 | [diff] [blame] | 1205 | static int __init armv8_pmu_driver_init(void) |
| 1206 | { |
| 1207 | if (acpi_disabled) |
| 1208 | return platform_driver_register(&armv8_pmu_driver); |
| 1209 | else |
| 1210 | return arm_pmu_acpi_probe(armv8_pmuv3_init); |
| 1211 | } |
| 1212 | device_initcall(armv8_pmu_driver_init) |
Michael O'Farrell | 9d2dcc8f | 2018-07-30 13:14:34 -0700 | [diff] [blame] | 1213 | |
| 1214 | void arch_perf_update_userpage(struct perf_event *event, |
| 1215 | struct perf_event_mmap_page *userpg, u64 now) |
| 1216 | { |
Peter Zijlstra | 950b74dd | 2020-07-16 13:11:26 +0800 | [diff] [blame] | 1217 | struct clock_read_data *rd; |
| 1218 | unsigned int seq; |
| 1219 | u64 ns; |
Michael O'Farrell | 9d2dcc8f | 2018-07-30 13:14:34 -0700 | [diff] [blame] | 1220 | |
Peter Zijlstra | 279a811 | 2020-07-16 13:11:27 +0800 | [diff] [blame] | 1221 | userpg->cap_user_time = 0; |
| 1222 | userpg->cap_user_time_zero = 0; |
Peter Zijlstra | c8f9eb0 | 2020-07-16 13:11:29 +0800 | [diff] [blame] | 1223 | userpg->cap_user_time_short = 0; |
Michael O'Farrell | 9d2dcc8f | 2018-07-30 13:14:34 -0700 | [diff] [blame] | 1224 | |
Peter Zijlstra | 950b74dd | 2020-07-16 13:11:26 +0800 | [diff] [blame] | 1225 | do { |
| 1226 | rd = sched_clock_read_begin(&seq); |
| 1227 | |
Peter Zijlstra | 279a811 | 2020-07-16 13:11:27 +0800 | [diff] [blame] | 1228 | if (rd->read_sched_clock != arch_timer_read_counter) |
| 1229 | return; |
| 1230 | |
Peter Zijlstra | 950b74dd | 2020-07-16 13:11:26 +0800 | [diff] [blame] | 1231 | userpg->time_mult = rd->mult; |
| 1232 | userpg->time_shift = rd->shift; |
| 1233 | userpg->time_zero = rd->epoch_ns; |
Peter Zijlstra | c8f9eb0 | 2020-07-16 13:11:29 +0800 | [diff] [blame] | 1234 | userpg->time_cycles = rd->epoch_cyc; |
| 1235 | userpg->time_mask = rd->sched_clock_mask; |
Peter Zijlstra | 950b74dd | 2020-07-16 13:11:26 +0800 | [diff] [blame] | 1236 | |
| 1237 | /* |
Peter Zijlstra | c8f9eb0 | 2020-07-16 13:11:29 +0800 | [diff] [blame] | 1238 | * Subtract the cycle base, such that software that |
| 1239 | * doesn't know about cap_user_time_short still 'works' |
| 1240 | * assuming no wraps. |
Peter Zijlstra | 950b74dd | 2020-07-16 13:11:26 +0800 | [diff] [blame] | 1241 | */ |
| 1242 | ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift); |
| 1243 | userpg->time_zero -= ns; |
| 1244 | |
| 1245 | } while (sched_clock_read_retry(seq)); |
| 1246 | |
| 1247 | userpg->time_offset = userpg->time_zero - now; |
| 1248 | |
Michael O'Farrell | 9d2dcc8f | 2018-07-30 13:14:34 -0700 | [diff] [blame] | 1249 | /* |
| 1250 | * time_shift is not expected to be greater than 31 due to |
| 1251 | * the original published conversion algorithm shifting a |
| 1252 | * 32-bit value (now specifies a 64-bit value) - refer |
| 1253 | * perf_event_mmap_page documentation in perf_event.h. |
| 1254 | */ |
Peter Zijlstra | 950b74dd | 2020-07-16 13:11:26 +0800 | [diff] [blame] | 1255 | if (userpg->time_shift == 32) { |
| 1256 | userpg->time_shift = 31; |
Michael O'Farrell | 9d2dcc8f | 2018-07-30 13:14:34 -0700 | [diff] [blame] | 1257 | userpg->time_mult >>= 1; |
| 1258 | } |
Peter Zijlstra | 950b74dd | 2020-07-16 13:11:26 +0800 | [diff] [blame] | 1259 | |
Peter Zijlstra | 279a811 | 2020-07-16 13:11:27 +0800 | [diff] [blame] | 1260 | /* |
| 1261 | * Internal timekeeping for enabled/running/stopped times |
| 1262 | * is always computed with the sched_clock. |
| 1263 | */ |
| 1264 | userpg->cap_user_time = 1; |
| 1265 | userpg->cap_user_time_zero = 1; |
Peter Zijlstra | c8f9eb0 | 2020-07-16 13:11:29 +0800 | [diff] [blame] | 1266 | userpg->cap_user_time_short = 1; |
Michael O'Farrell | 9d2dcc8f | 2018-07-30 13:14:34 -0700 | [diff] [blame] | 1267 | } |