Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 2 | /* pcr.c: Generic sparc64 performance counter infrastructure. |
| 3 | * |
| 4 | * Copyright (C) 2009 David S. Miller (davem@davemloft.net) |
| 5 | */ |
| 6 | #include <linux/kernel.h> |
Paul Gortmaker | 066bcac | 2011-07-22 13:18:16 -0400 | [diff] [blame] | 7 | #include <linux/export.h> |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 8 | #include <linux/init.h> |
| 9 | #include <linux/irq.h> |
| 10 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 11 | #include <linux/irq_work.h> |
David S. Miller | 9960e9e | 2010-04-07 04:41:33 -0700 | [diff] [blame] | 12 | #include <linux/ftrace.h> |
David S. Miller | 5686f9c | 2009-09-10 05:59:24 -0700 | [diff] [blame] | 13 | |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 14 | #include <asm/pil.h> |
| 15 | #include <asm/pcr.h> |
David S. Miller | e5553a6 | 2009-01-29 21:22:47 -0800 | [diff] [blame] | 16 | #include <asm/nmi.h> |
David S. Miller | 6faaeb8 | 2012-08-17 00:20:39 -0700 | [diff] [blame] | 17 | #include <asm/asi.h> |
Paul Gortmaker | c2068da | 2011-08-01 13:42:48 -0400 | [diff] [blame] | 18 | #include <asm/spitfire.h> |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 19 | |
| 20 | /* This code is shared between various users of the performance |
| 21 | * counters. Users will be oprofile, pseudo-NMI watchdog, and the |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 22 | * perf_event support layer. |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 23 | */ |
| 24 | |
| 25 | /* Performance counter interrupts run unmasked at PIL level 15. |
| 26 | * Therefore we can't do things like wakeups and other work |
| 27 | * that expects IRQ disabling to be adhered to in locking etc. |
| 28 | * |
| 29 | * Therefore in such situations we defer the work by signalling |
| 30 | * a lower level cpu IRQ. |
| 31 | */ |
David S. Miller | 9960e9e | 2010-04-07 04:41:33 -0700 | [diff] [blame] | 32 | void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 33 | { |
David S. Miller | 5686f9c | 2009-09-10 05:59:24 -0700 | [diff] [blame] | 34 | struct pt_regs *old_regs; |
| 35 | |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 36 | clear_softint(1 << PIL_DEFERRED_PCR_WORK); |
David S. Miller | 5686f9c | 2009-09-10 05:59:24 -0700 | [diff] [blame] | 37 | |
| 38 | old_regs = set_irq_regs(regs); |
| 39 | irq_enter(); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 40 | #ifdef CONFIG_IRQ_WORK |
| 41 | irq_work_run(); |
David S. Miller | 5686f9c | 2009-09-10 05:59:24 -0700 | [diff] [blame] | 42 | #endif |
| 43 | irq_exit(); |
| 44 | set_irq_regs(old_regs); |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 45 | } |
| 46 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 47 | void arch_irq_work_raise(void) |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 48 | { |
| 49 | set_softint(1 << PIL_DEFERRED_PCR_WORK); |
| 50 | } |
| 51 | |
| 52 | const struct pcr_ops *pcr_ops; |
| 53 | EXPORT_SYMBOL_GPL(pcr_ops); |
| 54 | |
David S. Miller | 0bab20b | 2012-08-16 21:16:22 -0700 | [diff] [blame] | 55 | static u64 direct_pcr_read(unsigned long reg_num) |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 56 | { |
| 57 | u64 val; |
| 58 | |
David S. Miller | 0bab20b | 2012-08-16 21:16:22 -0700 | [diff] [blame] | 59 | WARN_ON_ONCE(reg_num != 0); |
David S. Miller | 09d053c | 2012-08-16 23:19:32 -0700 | [diff] [blame] | 60 | __asm__ __volatile__("rd %%pcr, %0" : "=r" (val)); |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 61 | return val; |
| 62 | } |
| 63 | |
David S. Miller | 0bab20b | 2012-08-16 21:16:22 -0700 | [diff] [blame] | 64 | static void direct_pcr_write(unsigned long reg_num, u64 val) |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 65 | { |
David S. Miller | 0bab20b | 2012-08-16 21:16:22 -0700 | [diff] [blame] | 66 | WARN_ON_ONCE(reg_num != 0); |
David S. Miller | 09d053c | 2012-08-16 23:19:32 -0700 | [diff] [blame] | 67 | __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val)); |
| 68 | } |
| 69 | |
| 70 | static u64 direct_pic_read(unsigned long reg_num) |
| 71 | { |
| 72 | u64 val; |
| 73 | |
| 74 | WARN_ON_ONCE(reg_num != 0); |
| 75 | __asm__ __volatile__("rd %%pic, %0" : "=r" (val)); |
| 76 | return val; |
| 77 | } |
| 78 | |
| 79 | static void direct_pic_write(unsigned long reg_num, u64 val) |
| 80 | { |
| 81 | WARN_ON_ONCE(reg_num != 0); |
| 82 | |
| 83 | /* Blackbird errata workaround. See commentary in |
| 84 | * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt() |
| 85 | * for more information. |
| 86 | */ |
| 87 | __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" |
| 88 | " nop\n\t" |
| 89 | ".align 64\n" |
| 90 | "99:wr %0, 0x0, %%pic\n\t" |
| 91 | "rd %%pic, %%g0" : : "r" (val)); |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 92 | } |
| 93 | |
David S. Miller | 73a6b05 | 2012-08-16 23:26:01 -0700 | [diff] [blame] | 94 | static u64 direct_picl_value(unsigned int nmi_hz) |
| 95 | { |
| 96 | u32 delta = local_cpu_data().clock_tick / nmi_hz; |
| 97 | |
| 98 | return ((u64)((0 - delta) & 0xffffffff)) << 32; |
| 99 | } |
| 100 | |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 101 | static const struct pcr_ops direct_pcr_ops = { |
David S. Miller | ce4a925 | 2012-08-16 23:31:59 -0700 | [diff] [blame] | 102 | .read_pcr = direct_pcr_read, |
| 103 | .write_pcr = direct_pcr_write, |
| 104 | .read_pic = direct_pic_read, |
| 105 | .write_pic = direct_pic_write, |
| 106 | .nmi_picl_value = direct_picl_value, |
| 107 | .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE), |
| 108 | .pcr_nmi_disable = PCR_PIC_PRIV, |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 109 | }; |
| 110 | |
David S. Miller | 0bab20b | 2012-08-16 21:16:22 -0700 | [diff] [blame] | 111 | static void n2_pcr_write(unsigned long reg_num, u64 val) |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 112 | { |
| 113 | unsigned long ret; |
| 114 | |
David S. Miller | 0bab20b | 2012-08-16 21:16:22 -0700 | [diff] [blame] | 115 | WARN_ON_ONCE(reg_num != 0); |
David S. Miller | 314ff52 | 2011-07-27 20:46:25 -0700 | [diff] [blame] | 116 | if (val & PCR_N2_HTRACE) { |
| 117 | ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val); |
| 118 | if (ret != HV_EOK) |
David S. Miller | 09d053c | 2012-08-16 23:19:32 -0700 | [diff] [blame] | 119 | direct_pcr_write(reg_num, val); |
David S. Miller | 314ff52 | 2011-07-27 20:46:25 -0700 | [diff] [blame] | 120 | } else |
David S. Miller | 09d053c | 2012-08-16 23:19:32 -0700 | [diff] [blame] | 121 | direct_pcr_write(reg_num, val); |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 122 | } |
| 123 | |
David S. Miller | 73a6b05 | 2012-08-16 23:26:01 -0700 | [diff] [blame] | 124 | static u64 n2_picl_value(unsigned int nmi_hz) |
| 125 | { |
| 126 | u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2); |
| 127 | |
| 128 | return ((u64)((0 - delta) & 0xffffffff)) << 32; |
| 129 | } |
| 130 | |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 131 | static const struct pcr_ops n2_pcr_ops = { |
David S. Miller | ce4a925 | 2012-08-16 23:31:59 -0700 | [diff] [blame] | 132 | .read_pcr = direct_pcr_read, |
| 133 | .write_pcr = n2_pcr_write, |
| 134 | .read_pic = direct_pic_read, |
| 135 | .write_pic = direct_pic_write, |
| 136 | .nmi_picl_value = n2_picl_value, |
| 137 | .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | |
| 138 | PCR_N2_TOE_OV1 | |
| 139 | (2 << PCR_N2_SL1_SHIFT) | |
| 140 | (0xff << PCR_N2_MASK1_SHIFT)), |
| 141 | .pcr_nmi_disable = PCR_PIC_PRIV, |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 142 | }; |
| 143 | |
David S. Miller | 6faaeb8 | 2012-08-17 00:20:39 -0700 | [diff] [blame] | 144 | static u64 n4_pcr_read(unsigned long reg_num) |
| 145 | { |
| 146 | unsigned long val; |
| 147 | |
| 148 | (void) sun4v_vt_get_perfreg(reg_num, &val); |
| 149 | |
| 150 | return val; |
| 151 | } |
| 152 | |
| 153 | static void n4_pcr_write(unsigned long reg_num, u64 val) |
| 154 | { |
| 155 | (void) sun4v_vt_set_perfreg(reg_num, val); |
| 156 | } |
| 157 | |
| 158 | static u64 n4_pic_read(unsigned long reg_num) |
| 159 | { |
| 160 | unsigned long val; |
| 161 | |
| 162 | __asm__ __volatile__("ldxa [%1] %2, %0" |
| 163 | : "=r" (val) |
| 164 | : "r" (reg_num * 0x8UL), "i" (ASI_PIC)); |
| 165 | |
| 166 | return val; |
| 167 | } |
| 168 | |
| 169 | static void n4_pic_write(unsigned long reg_num, u64 val) |
| 170 | { |
| 171 | __asm__ __volatile__("stxa %0, [%1] %2" |
| 172 | : /* no outputs */ |
| 173 | : "r" (val), "r" (reg_num * 0x8UL), "i" (ASI_PIC)); |
| 174 | } |
| 175 | |
| 176 | static u64 n4_picl_value(unsigned int nmi_hz) |
| 177 | { |
| 178 | u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2); |
| 179 | |
| 180 | return ((u64)((0 - delta) & 0xffffffff)); |
| 181 | } |
| 182 | |
| 183 | static const struct pcr_ops n4_pcr_ops = { |
| 184 | .read_pcr = n4_pcr_read, |
| 185 | .write_pcr = n4_pcr_write, |
| 186 | .read_pic = n4_pic_read, |
| 187 | .write_pic = n4_pic_write, |
| 188 | .nmi_picl_value = n4_picl_value, |
| 189 | .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | |
| 190 | PCR_N4_UTRACE | PCR_N4_TOE | |
| 191 | (26 << PCR_N4_SL_SHIFT)), |
| 192 | .pcr_nmi_disable = PCR_N4_PICNPT, |
| 193 | }; |
| 194 | |
bob picco | 05aa165 | 2014-09-16 10:09:06 -0400 | [diff] [blame] | 195 | static u64 n5_pcr_read(unsigned long reg_num) |
| 196 | { |
| 197 | unsigned long val; |
| 198 | |
| 199 | (void) sun4v_t5_get_perfreg(reg_num, &val); |
| 200 | |
| 201 | return val; |
| 202 | } |
| 203 | |
| 204 | static void n5_pcr_write(unsigned long reg_num, u64 val) |
| 205 | { |
| 206 | (void) sun4v_t5_set_perfreg(reg_num, val); |
| 207 | } |
| 208 | |
| 209 | static const struct pcr_ops n5_pcr_ops = { |
| 210 | .read_pcr = n5_pcr_read, |
| 211 | .write_pcr = n5_pcr_write, |
| 212 | .read_pic = n4_pic_read, |
| 213 | .write_pic = n4_pic_write, |
| 214 | .nmi_picl_value = n4_picl_value, |
| 215 | .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | |
| 216 | PCR_N4_UTRACE | PCR_N4_TOE | |
| 217 | (26 << PCR_N4_SL_SHIFT)), |
| 218 | .pcr_nmi_disable = PCR_N4_PICNPT, |
| 219 | }; |
| 220 | |
David Ahern | b5aff55 | 2015-03-19 16:06:37 -0400 | [diff] [blame] | 221 | static u64 m7_pcr_read(unsigned long reg_num) |
| 222 | { |
| 223 | unsigned long val; |
| 224 | |
| 225 | (void) sun4v_m7_get_perfreg(reg_num, &val); |
| 226 | |
| 227 | return val; |
| 228 | } |
| 229 | |
| 230 | static void m7_pcr_write(unsigned long reg_num, u64 val) |
| 231 | { |
| 232 | (void) sun4v_m7_set_perfreg(reg_num, val); |
| 233 | } |
| 234 | |
| 235 | static const struct pcr_ops m7_pcr_ops = { |
| 236 | .read_pcr = m7_pcr_read, |
| 237 | .write_pcr = m7_pcr_write, |
| 238 | .read_pic = n4_pic_read, |
| 239 | .write_pic = n4_pic_write, |
| 240 | .nmi_picl_value = n4_picl_value, |
| 241 | .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | |
| 242 | PCR_N4_UTRACE | PCR_N4_TOE | |
| 243 | (26 << PCR_N4_SL_SHIFT)), |
| 244 | .pcr_nmi_disable = PCR_N4_PICNPT, |
| 245 | }; |
bob picco | 05aa165 | 2014-09-16 10:09:06 -0400 | [diff] [blame] | 246 | |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 247 | static unsigned long perf_hsvc_group; |
| 248 | static unsigned long perf_hsvc_major; |
| 249 | static unsigned long perf_hsvc_minor; |
| 250 | |
| 251 | static int __init register_perf_hsvc(void) |
| 252 | { |
bob picco | 05aa165 | 2014-09-16 10:09:06 -0400 | [diff] [blame] | 253 | unsigned long hverror; |
| 254 | |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 255 | if (tlb_type == hypervisor) { |
| 256 | switch (sun4v_chip_type) { |
| 257 | case SUN4V_CHIP_NIAGARA1: |
| 258 | perf_hsvc_group = HV_GRP_NIAG_PERF; |
| 259 | break; |
| 260 | |
| 261 | case SUN4V_CHIP_NIAGARA2: |
| 262 | perf_hsvc_group = HV_GRP_N2_CPU; |
| 263 | break; |
| 264 | |
David S. Miller | 4ba991d | 2011-07-27 21:06:16 -0700 | [diff] [blame] | 265 | case SUN4V_CHIP_NIAGARA3: |
| 266 | perf_hsvc_group = HV_GRP_KT_CPU; |
| 267 | break; |
| 268 | |
David S. Miller | 6faaeb8 | 2012-08-17 00:20:39 -0700 | [diff] [blame] | 269 | case SUN4V_CHIP_NIAGARA4: |
| 270 | perf_hsvc_group = HV_GRP_VT_CPU; |
| 271 | break; |
| 272 | |
bob picco | 05aa165 | 2014-09-16 10:09:06 -0400 | [diff] [blame] | 273 | case SUN4V_CHIP_NIAGARA5: |
| 274 | perf_hsvc_group = HV_GRP_T5_CPU; |
| 275 | break; |
| 276 | |
David Ahern | b5aff55 | 2015-03-19 16:06:37 -0400 | [diff] [blame] | 277 | case SUN4V_CHIP_SPARC_M7: |
| 278 | perf_hsvc_group = HV_GRP_M7_PERF; |
| 279 | break; |
| 280 | |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 281 | default: |
| 282 | return -ENODEV; |
| 283 | } |
| 284 | |
| 285 | |
| 286 | perf_hsvc_major = 1; |
| 287 | perf_hsvc_minor = 0; |
bob picco | 05aa165 | 2014-09-16 10:09:06 -0400 | [diff] [blame] | 288 | hverror = sun4v_hvapi_register(perf_hsvc_group, |
| 289 | perf_hsvc_major, |
| 290 | &perf_hsvc_minor); |
| 291 | if (hverror) { |
| 292 | pr_err("perfmon: Could not register hvapi(0x%lx).\n", |
| 293 | hverror); |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 294 | return -ENODEV; |
| 295 | } |
| 296 | } |
| 297 | return 0; |
| 298 | } |
| 299 | |
| 300 | static void __init unregister_perf_hsvc(void) |
| 301 | { |
| 302 | if (tlb_type != hypervisor) |
| 303 | return; |
| 304 | sun4v_hvapi_unregister(perf_hsvc_group); |
| 305 | } |
| 306 | |
David S. Miller | 6faaeb8 | 2012-08-17 00:20:39 -0700 | [diff] [blame] | 307 | static int __init setup_sun4v_pcr_ops(void) |
| 308 | { |
| 309 | int ret = 0; |
| 310 | |
| 311 | switch (sun4v_chip_type) { |
| 312 | case SUN4V_CHIP_NIAGARA1: |
| 313 | case SUN4V_CHIP_NIAGARA2: |
| 314 | case SUN4V_CHIP_NIAGARA3: |
| 315 | pcr_ops = &n2_pcr_ops; |
| 316 | break; |
| 317 | |
| 318 | case SUN4V_CHIP_NIAGARA4: |
| 319 | pcr_ops = &n4_pcr_ops; |
| 320 | break; |
| 321 | |
bob picco | 05aa165 | 2014-09-16 10:09:06 -0400 | [diff] [blame] | 322 | case SUN4V_CHIP_NIAGARA5: |
| 323 | pcr_ops = &n5_pcr_ops; |
| 324 | break; |
| 325 | |
David Ahern | b5aff55 | 2015-03-19 16:06:37 -0400 | [diff] [blame] | 326 | case SUN4V_CHIP_SPARC_M7: |
| 327 | pcr_ops = &m7_pcr_ops; |
| 328 | break; |
| 329 | |
David S. Miller | 6faaeb8 | 2012-08-17 00:20:39 -0700 | [diff] [blame] | 330 | default: |
| 331 | ret = -ENODEV; |
| 332 | break; |
| 333 | } |
| 334 | |
| 335 | return ret; |
| 336 | } |
| 337 | |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 338 | int __init pcr_arch_init(void) |
| 339 | { |
| 340 | int err = register_perf_hsvc(); |
| 341 | |
| 342 | if (err) |
| 343 | return err; |
| 344 | |
| 345 | switch (tlb_type) { |
| 346 | case hypervisor: |
David S. Miller | 6faaeb8 | 2012-08-17 00:20:39 -0700 | [diff] [blame] | 347 | err = setup_sun4v_pcr_ops(); |
| 348 | if (err) |
| 349 | goto out_unregister; |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 350 | break; |
| 351 | |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 352 | case cheetah: |
| 353 | case cheetah_plus: |
| 354 | pcr_ops = &direct_pcr_ops; |
| 355 | break; |
| 356 | |
David S. Miller | 1c2f61d | 2009-02-05 23:59:04 -0800 | [diff] [blame] | 357 | case spitfire: |
| 358 | /* UltraSPARC-I/II and derivatives lack a profile |
| 359 | * counter overflow interrupt so we can't make use of |
| 360 | * their hardware currently. |
| 361 | */ |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 362 | fallthrough; |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 363 | default: |
| 364 | err = -ENODEV; |
| 365 | goto out_unregister; |
| 366 | } |
| 367 | |
David S. Miller | e5553a6 | 2009-01-29 21:22:47 -0800 | [diff] [blame] | 368 | return nmi_init(); |
David S. Miller | 3eb8057 | 2009-01-21 21:30:23 -0800 | [diff] [blame] | 369 | |
| 370 | out_unregister: |
| 371 | unregister_perf_hsvc(); |
| 372 | return err; |
| 373 | } |