blob: 95ab5e28f5be3d737774e3f314adcbbe1954ab35 [file] [log] [blame]
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001/*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/perf_event.h>
26#include <linux/pm_runtime.h>
27
28#include "i915_drv.h"
29#include "i915_pmu.h"
30#include "intel_ringbuffer.h"
31
32/* Frequency for the sampling timer for events which need it. */
33#define FREQUENCY 200
34#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
35
36#define ENGINE_SAMPLE_MASK \
37 (BIT(I915_SAMPLE_BUSY) | \
38 BIT(I915_SAMPLE_WAIT) | \
39 BIT(I915_SAMPLE_SEMA))
40
41#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
42
Chris Wilson141a0892017-11-23 12:34:31 +000043static cpumask_t i915_pmu_cpumask;
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +000044
45static u8 engine_config_sample(u64 config)
46{
47 return config & I915_PMU_SAMPLE_MASK;
48}
49
50static u8 engine_event_sample(struct perf_event *event)
51{
52 return engine_config_sample(event->attr.config);
53}
54
55static u8 engine_event_class(struct perf_event *event)
56{
57 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
58}
59
60static u8 engine_event_instance(struct perf_event *event)
61{
62 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
63}
64
65static bool is_engine_config(u64 config)
66{
67 return config < __I915_PMU_OTHER(0);
68}
69
70static unsigned int config_enabled_bit(u64 config)
71{
72 if (is_engine_config(config))
73 return engine_config_sample(config);
74 else
75 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
76}
77
78static u64 config_enabled_mask(u64 config)
79{
80 return BIT_ULL(config_enabled_bit(config));
81}
82
83static bool is_engine_event(struct perf_event *event)
84{
85 return is_engine_config(event->attr.config);
86}
87
88static unsigned int event_enabled_bit(struct perf_event *event)
89{
90 return config_enabled_bit(event->attr.config);
91}
92
Tvrtko Ursulinfeff0dc2017-11-21 18:18:46 +000093static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
94{
95 u64 enable;
96
97 /*
98 * Only some counters need the sampling timer.
99 *
100 * We start with a bitmask of all currently enabled events.
101 */
102 enable = i915->pmu.enable;
103
104 /*
105 * Mask out all the ones which do not need the timer, or in
106 * other words keep all the ones that could need the timer.
107 */
108 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
109 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
110 ENGINE_SAMPLE_MASK;
111
112 /*
113 * When the GPU is idle per-engine counters do not need to be
114 * running so clear those bits out.
115 */
116 if (!gpu_active)
117 enable &= ~ENGINE_SAMPLE_MASK;
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000118 /*
119 * Also there is software busyness tracking available we do not
120 * need the timer for I915_SAMPLE_BUSY counter.
Tvrtko Ursulincf669b42017-11-29 10:28:05 +0000121 *
122 * Use RCS as proxy for all engines.
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000123 */
Tvrtko Ursulincf669b42017-11-29 10:28:05 +0000124 else if (intel_engine_supports_stats(i915->engine[RCS]))
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000125 enable &= ~BIT(I915_SAMPLE_BUSY);
Tvrtko Ursulinfeff0dc2017-11-21 18:18:46 +0000126
127 /*
128 * If some bits remain it means we need the sampling timer running.
129 */
130 return enable;
131}
132
133void i915_pmu_gt_parked(struct drm_i915_private *i915)
134{
135 if (!i915->pmu.base.event_init)
136 return;
137
138 spin_lock_irq(&i915->pmu.lock);
139 /*
140 * Signal sampling timer to stop if only engine events are enabled and
141 * GPU went idle.
142 */
143 i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
144 spin_unlock_irq(&i915->pmu.lock);
145}
146
147static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
148{
149 if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
150 i915->pmu.timer_enabled = true;
151 hrtimer_start_range_ns(&i915->pmu.timer,
152 ns_to_ktime(PERIOD), 0,
153 HRTIMER_MODE_REL_PINNED);
154 }
155}
156
157void i915_pmu_gt_unparked(struct drm_i915_private *i915)
158{
159 if (!i915->pmu.base.event_init)
160 return;
161
162 spin_lock_irq(&i915->pmu.lock);
163 /*
164 * Re-enable sampling timer when GPU goes active.
165 */
166 __i915_pmu_maybe_start_timer(i915);
167 spin_unlock_irq(&i915->pmu.lock);
168}
169
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000170static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
171{
172 if (!fw)
173 intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
174
175 return true;
176}
177
178static void
179update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
180{
Tvrtko Ursulin8ee4f192017-11-24 09:49:59 +0000181 sample->cur += mul_u32_u32(val, unit);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000182}
183
184static void engines_sample(struct drm_i915_private *dev_priv)
185{
186 struct intel_engine_cs *engine;
187 enum intel_engine_id id;
188 bool fw = false;
189
190 if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
191 return;
192
193 if (!dev_priv->gt.awake)
194 return;
195
196 if (!intel_runtime_pm_get_if_in_use(dev_priv))
197 return;
198
199 for_each_engine(engine, dev_priv, id) {
200 u32 current_seqno = intel_engine_get_seqno(engine);
201 u32 last_seqno = intel_engine_last_submit(engine);
202 u32 val;
203
204 val = !i915_seqno_passed(current_seqno, last_seqno);
205
206 update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
207 PERIOD, val);
208
209 if (val && (engine->pmu.enable &
210 (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
211 fw = grab_forcewake(dev_priv, fw);
212
213 val = I915_READ_FW(RING_CTL(engine->mmio_base));
214 } else {
215 val = 0;
216 }
217
218 update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
219 PERIOD, !!(val & RING_WAIT));
220
221 update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
222 PERIOD, !!(val & RING_WAIT_SEMAPHORE));
223 }
224
225 if (fw)
226 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
227
228 intel_runtime_pm_put(dev_priv);
229}
230
231static void frequency_sample(struct drm_i915_private *dev_priv)
232{
233 if (dev_priv->pmu.enable &
234 config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
235 u32 val;
236
237 val = dev_priv->gt_pm.rps.cur_freq;
238 if (dev_priv->gt.awake &&
239 intel_runtime_pm_get_if_in_use(dev_priv)) {
240 val = intel_get_cagf(dev_priv,
241 I915_READ_NOTRACE(GEN6_RPSTAT1));
242 intel_runtime_pm_put(dev_priv);
243 }
244
245 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
246 1, intel_gpu_freq(dev_priv, val));
247 }
248
249 if (dev_priv->pmu.enable &
250 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
251 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
252 intel_gpu_freq(dev_priv,
253 dev_priv->gt_pm.rps.cur_freq));
254 }
255}
256
257static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
258{
259 struct drm_i915_private *i915 =
260 container_of(hrtimer, struct drm_i915_private, pmu.timer);
261
Tvrtko Ursulin8ee4f192017-11-24 09:49:59 +0000262 if (!READ_ONCE(i915->pmu.timer_enabled))
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000263 return HRTIMER_NORESTART;
264
265 engines_sample(i915);
266 frequency_sample(i915);
267
268 hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
269 return HRTIMER_RESTART;
270}
271
Tvrtko Ursulin0cd46842017-11-21 18:18:50 +0000272static u64 count_interrupts(struct drm_i915_private *i915)
273{
274 /* open-coded kstat_irqs() */
275 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
276 u64 sum = 0;
277 int cpu;
278
279 if (!desc || !desc->kstat_irqs)
280 return 0;
281
282 for_each_possible_cpu(cpu)
283 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
284
285 return sum;
286}
287
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000288static void i915_pmu_event_destroy(struct perf_event *event)
289{
290 WARN_ON(event->parent);
291}
292
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000293static int
294engine_event_status(struct intel_engine_cs *engine,
295 enum drm_i915_pmu_engine_sample sample)
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000296{
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000297 switch (sample) {
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000298 case I915_SAMPLE_BUSY:
299 case I915_SAMPLE_WAIT:
300 break;
301 case I915_SAMPLE_SEMA:
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000302 if (INTEL_GEN(engine->i915) < 6)
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000303 return -ENODEV;
304 break;
305 default:
306 return -ENOENT;
307 }
308
309 return 0;
310}
311
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000312static int
313config_status(struct drm_i915_private *i915, u64 config)
314{
315 switch (config) {
316 case I915_PMU_ACTUAL_FREQUENCY:
317 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
318 /* Requires a mutex for sampling! */
319 return -ENODEV;
320 /* Fall-through. */
321 case I915_PMU_REQUESTED_FREQUENCY:
322 if (INTEL_GEN(i915) < 6)
323 return -ENODEV;
324 break;
325 case I915_PMU_INTERRUPTS:
326 break;
327 case I915_PMU_RC6_RESIDENCY:
328 if (!HAS_RC6(i915))
329 return -ENODEV;
330 break;
331 default:
332 return -ENOENT;
333 }
334
335 return 0;
336}
337
338static int engine_event_init(struct perf_event *event)
339{
340 struct drm_i915_private *i915 =
341 container_of(event->pmu, typeof(*i915), pmu.base);
342 struct intel_engine_cs *engine;
343
344 engine = intel_engine_lookup_user(i915, engine_event_class(event),
345 engine_event_instance(event));
346 if (!engine)
347 return -ENODEV;
348
349 return engine_event_status(engine, engine_event_sample(event));
350}
351
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000352static int i915_pmu_event_init(struct perf_event *event)
353{
354 struct drm_i915_private *i915 =
355 container_of(event->pmu, typeof(*i915), pmu.base);
Tvrtko Ursulin0426c042017-11-23 12:34:32 +0000356 int ret;
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000357
358 if (event->attr.type != event->pmu->type)
359 return -ENOENT;
360
361 /* unsupported modes and filters */
362 if (event->attr.sample_period) /* no sampling */
363 return -EINVAL;
364
365 if (has_branch_stack(event))
366 return -EOPNOTSUPP;
367
368 if (event->cpu < 0)
369 return -EINVAL;
370
Tvrtko Ursulin0426c042017-11-23 12:34:32 +0000371 /* only allow running on one cpu at a time */
372 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
Tvrtko Ursulin00a79722017-11-28 10:55:15 +0000373 return -EINVAL;
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000374
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000375 if (is_engine_event(event))
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000376 ret = engine_event_init(event);
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000377 else
378 ret = config_status(i915, event->attr.config);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000379 if (ret)
380 return ret;
381
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000382 if (!event->parent)
383 event->destroy = i915_pmu_event_destroy;
384
385 return 0;
386}
387
388static u64 __i915_pmu_event_read(struct perf_event *event)
389{
390 struct drm_i915_private *i915 =
391 container_of(event->pmu, typeof(*i915), pmu.base);
392 u64 val = 0;
393
394 if (is_engine_event(event)) {
395 u8 sample = engine_event_sample(event);
396 struct intel_engine_cs *engine;
397
398 engine = intel_engine_lookup_user(i915,
399 engine_event_class(event),
400 engine_event_instance(event));
401
402 if (WARN_ON_ONCE(!engine)) {
403 /* Do nothing */
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000404 } else if (sample == I915_SAMPLE_BUSY &&
405 engine->pmu.busy_stats) {
406 val = ktime_to_ns(intel_engine_get_busy_time(engine));
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000407 } else {
408 val = engine->pmu.sample[sample].cur;
409 }
410 } else {
411 switch (event->attr.config) {
412 case I915_PMU_ACTUAL_FREQUENCY:
413 val =
414 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
415 FREQUENCY);
416 break;
417 case I915_PMU_REQUESTED_FREQUENCY:
418 val =
419 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
420 FREQUENCY);
421 break;
Tvrtko Ursulin0cd46842017-11-21 18:18:50 +0000422 case I915_PMU_INTERRUPTS:
423 val = count_interrupts(i915);
424 break;
Tvrtko Ursulin6060b6a2017-11-21 18:18:52 +0000425 case I915_PMU_RC6_RESIDENCY:
426 intel_runtime_pm_get(i915);
427 val = intel_rc6_residency_ns(i915,
428 IS_VALLEYVIEW(i915) ?
429 VLV_GT_RENDER_RC6 :
430 GEN6_GT_GFX_RC6);
Chris Wilsonfb6db0f2017-12-01 11:30:30 +0000431 if (HAS_RC6p(i915))
Tvrtko Ursulin3452fa32017-11-24 17:13:31 +0000432 val += intel_rc6_residency_ns(i915,
433 GEN6_GT_GFX_RC6p);
Chris Wilsonfb6db0f2017-12-01 11:30:30 +0000434 if (HAS_RC6pp(i915))
Tvrtko Ursulin3452fa32017-11-24 17:13:31 +0000435 val += intel_rc6_residency_ns(i915,
436 GEN6_GT_GFX_RC6pp);
Tvrtko Ursulin6060b6a2017-11-21 18:18:52 +0000437 intel_runtime_pm_put(i915);
438 break;
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000439 }
440 }
441
442 return val;
443}
444
445static void i915_pmu_event_read(struct perf_event *event)
446{
447 struct hw_perf_event *hwc = &event->hw;
448 u64 prev, new;
449
450again:
451 prev = local64_read(&hwc->prev_count);
452 new = __i915_pmu_event_read(event);
453
454 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
455 goto again;
456
457 local64_add(new - prev, &event->count);
458}
459
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000460static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
461{
Tvrtko Ursulincf669b42017-11-29 10:28:05 +0000462 return intel_engine_supports_stats(engine) &&
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000463 (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
464}
465
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000466static void i915_pmu_enable(struct perf_event *event)
467{
468 struct drm_i915_private *i915 =
469 container_of(event->pmu, typeof(*i915), pmu.base);
470 unsigned int bit = event_enabled_bit(event);
471 unsigned long flags;
472
473 spin_lock_irqsave(&i915->pmu.lock, flags);
474
475 /*
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000476 * Update the bitmask of enabled events and increment
477 * the event reference counter.
478 */
479 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
480 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
481 i915->pmu.enable |= BIT_ULL(bit);
482 i915->pmu.enable_count[bit]++;
483
484 /*
Tvrtko Ursulinfeff0dc2017-11-21 18:18:46 +0000485 * Start the sampling timer if needed and not already enabled.
486 */
487 __i915_pmu_maybe_start_timer(i915);
488
489 /*
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000490 * For per-engine events the bitmask and reference counting
491 * is stored per engine.
492 */
493 if (is_engine_event(event)) {
494 u8 sample = engine_event_sample(event);
495 struct intel_engine_cs *engine;
496
497 engine = intel_engine_lookup_user(i915,
498 engine_event_class(event),
499 engine_event_instance(event));
500 GEM_BUG_ON(!engine);
501 engine->pmu.enable |= BIT(sample);
502
503 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
504 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000505 if (engine->pmu.enable_count[sample]++ == 0) {
506 /*
507 * Enable engine busy stats tracking if needed or
508 * alternatively cancel the scheduled disable.
509 *
510 * If the delayed disable was pending, cancel it and
511 * in this case do not enable since it already is.
512 */
513 if (engine_needs_busy_stats(engine) &&
514 !engine->pmu.busy_stats) {
515 engine->pmu.busy_stats = true;
516 if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
517 intel_enable_engine_stats(engine);
518 }
519 }
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000520 }
521
522 /*
523 * Store the current counter value so we can report the correct delta
524 * for all listeners. Even when the event was already enabled and has
525 * an existing non-zero value.
526 */
527 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
528
529 spin_unlock_irqrestore(&i915->pmu.lock, flags);
530}
531
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000532static void __disable_busy_stats(struct work_struct *work)
533{
534 struct intel_engine_cs *engine =
535 container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
536
537 intel_disable_engine_stats(engine);
538}
539
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000540static void i915_pmu_disable(struct perf_event *event)
541{
542 struct drm_i915_private *i915 =
543 container_of(event->pmu, typeof(*i915), pmu.base);
544 unsigned int bit = event_enabled_bit(event);
545 unsigned long flags;
546
547 spin_lock_irqsave(&i915->pmu.lock, flags);
548
549 if (is_engine_event(event)) {
550 u8 sample = engine_event_sample(event);
551 struct intel_engine_cs *engine;
552
553 engine = intel_engine_lookup_user(i915,
554 engine_event_class(event),
555 engine_event_instance(event));
556 GEM_BUG_ON(!engine);
557 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
558 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
559 /*
560 * Decrement the reference count and clear the enabled
561 * bitmask when the last listener on an event goes away.
562 */
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000563 if (--engine->pmu.enable_count[sample] == 0) {
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000564 engine->pmu.enable &= ~BIT(sample);
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000565 if (!engine_needs_busy_stats(engine) &&
566 engine->pmu.busy_stats) {
567 engine->pmu.busy_stats = false;
568 /*
569 * We request a delayed disable to handle the
570 * rapid on/off cycles on events, which can
571 * happen when tools like perf stat start, in a
572 * nicer way.
573 *
574 * In addition, this also helps with busy stats
575 * accuracy with background CPU offline/online
576 * migration events.
577 */
578 queue_delayed_work(system_wq,
579 &engine->pmu.disable_busy_stats,
580 round_jiffies_up_relative(HZ));
581 }
582 }
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000583 }
584
585 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
586 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
587 /*
588 * Decrement the reference count and clear the enabled
589 * bitmask when the last listener on an event goes away.
590 */
Tvrtko Ursulinfeff0dc2017-11-21 18:18:46 +0000591 if (--i915->pmu.enable_count[bit] == 0) {
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000592 i915->pmu.enable &= ~BIT_ULL(bit);
Tvrtko Ursulinfeff0dc2017-11-21 18:18:46 +0000593 i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
594 }
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000595
596 spin_unlock_irqrestore(&i915->pmu.lock, flags);
597}
598
599static void i915_pmu_event_start(struct perf_event *event, int flags)
600{
601 i915_pmu_enable(event);
602 event->hw.state = 0;
603}
604
605static void i915_pmu_event_stop(struct perf_event *event, int flags)
606{
607 if (flags & PERF_EF_UPDATE)
608 i915_pmu_event_read(event);
609 i915_pmu_disable(event);
610 event->hw.state = PERF_HES_STOPPED;
611}
612
613static int i915_pmu_event_add(struct perf_event *event, int flags)
614{
615 if (flags & PERF_EF_START)
616 i915_pmu_event_start(event, flags);
617
618 return 0;
619}
620
621static void i915_pmu_event_del(struct perf_event *event, int flags)
622{
623 i915_pmu_event_stop(event, PERF_EF_UPDATE);
624}
625
626static int i915_pmu_event_event_idx(struct perf_event *event)
627{
628 return 0;
629}
630
Chris Wilsonb7d3aab2017-11-23 21:17:51 +0000631struct i915_str_attribute {
632 struct device_attribute attr;
633 const char *str;
634};
635
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000636static ssize_t i915_pmu_format_show(struct device *dev,
637 struct device_attribute *attr, char *buf)
638{
Chris Wilsonb7d3aab2017-11-23 21:17:51 +0000639 struct i915_str_attribute *eattr;
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000640
Chris Wilsonb7d3aab2017-11-23 21:17:51 +0000641 eattr = container_of(attr, struct i915_str_attribute, attr);
642 return sprintf(buf, "%s\n", eattr->str);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000643}
644
645#define I915_PMU_FORMAT_ATTR(_name, _config) \
Chris Wilsonb7d3aab2017-11-23 21:17:51 +0000646 (&((struct i915_str_attribute[]) { \
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000647 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
Chris Wilsonb7d3aab2017-11-23 21:17:51 +0000648 .str = _config, } \
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000649 })[0].attr.attr)
650
651static struct attribute *i915_pmu_format_attrs[] = {
652 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
653 NULL,
654};
655
656static const struct attribute_group i915_pmu_format_attr_group = {
657 .name = "format",
658 .attrs = i915_pmu_format_attrs,
659};
660
Chris Wilsonb7d3aab2017-11-23 21:17:51 +0000661struct i915_ext_attribute {
662 struct device_attribute attr;
663 unsigned long val;
664};
665
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000666static ssize_t i915_pmu_event_show(struct device *dev,
667 struct device_attribute *attr, char *buf)
668{
Chris Wilsonb7d3aab2017-11-23 21:17:51 +0000669 struct i915_ext_attribute *eattr;
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000670
Chris Wilsonb7d3aab2017-11-23 21:17:51 +0000671 eattr = container_of(attr, struct i915_ext_attribute, attr);
672 return sprintf(buf, "config=0x%lx\n", eattr->val);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000673}
674
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000675static struct attribute_group i915_pmu_events_attr_group = {
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000676 .name = "events",
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000677 /* Patch in attrs at runtime. */
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000678};
679
680static ssize_t
681i915_pmu_get_attr_cpumask(struct device *dev,
682 struct device_attribute *attr,
683 char *buf)
684{
685 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
686}
687
688static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
689
690static struct attribute *i915_cpumask_attrs[] = {
691 &dev_attr_cpumask.attr,
692 NULL,
693};
694
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000695static const struct attribute_group i915_pmu_cpumask_attr_group = {
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000696 .attrs = i915_cpumask_attrs,
697};
698
699static const struct attribute_group *i915_pmu_attr_groups[] = {
700 &i915_pmu_format_attr_group,
701 &i915_pmu_events_attr_group,
702 &i915_pmu_cpumask_attr_group,
703 NULL
704};
705
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000706#define __event(__config, __name, __unit) \
707{ \
708 .config = (__config), \
709 .name = (__name), \
710 .unit = (__unit), \
711}
712
713#define __engine_event(__sample, __name) \
714{ \
715 .sample = (__sample), \
716 .name = (__name), \
717}
718
719static struct i915_ext_attribute *
720add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
721{
Chris Wilson2bbba4e2018-01-11 14:04:02 +0000722 sysfs_attr_init(&attr->attr.attr);
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000723 attr->attr.attr.name = name;
724 attr->attr.attr.mode = 0444;
725 attr->attr.show = i915_pmu_event_show;
726 attr->val = config;
727
728 return ++attr;
729}
730
731static struct perf_pmu_events_attr *
732add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
733 const char *str)
734{
Chris Wilson2bbba4e2018-01-11 14:04:02 +0000735 sysfs_attr_init(&attr->attr.attr);
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000736 attr->attr.attr.name = name;
737 attr->attr.attr.mode = 0444;
738 attr->attr.show = perf_event_sysfs_show;
739 attr->event_str = str;
740
741 return ++attr;
742}
743
744static struct attribute **
745create_event_attributes(struct drm_i915_private *i915)
746{
747 static const struct {
748 u64 config;
749 const char *name;
750 const char *unit;
751 } events[] = {
752 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
753 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
754 __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
755 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
756 };
757 static const struct {
758 enum drm_i915_pmu_engine_sample sample;
759 char *name;
760 } engine_events[] = {
761 __engine_event(I915_SAMPLE_BUSY, "busy"),
762 __engine_event(I915_SAMPLE_SEMA, "sema"),
763 __engine_event(I915_SAMPLE_WAIT, "wait"),
764 };
765 unsigned int count = 0;
766 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
767 struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
768 struct attribute **attr = NULL, **attr_iter;
769 struct intel_engine_cs *engine;
770 enum intel_engine_id id;
771 unsigned int i;
772
773 /* Count how many counters we will be exposing. */
774 for (i = 0; i < ARRAY_SIZE(events); i++) {
775 if (!config_status(i915, events[i].config))
776 count++;
777 }
778
779 for_each_engine(engine, i915, id) {
780 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
781 if (!engine_event_status(engine,
782 engine_events[i].sample))
783 count++;
784 }
785 }
786
787 /* Allocate attribute objects and table. */
788 i915_attr = kzalloc(count * sizeof(*i915_attr), GFP_KERNEL);
789 if (!i915_attr)
790 goto err_alloc;
791
792 pmu_attr = kzalloc(count * sizeof(*pmu_attr), GFP_KERNEL);
793 if (!pmu_attr)
794 goto err_alloc;
795
796 /* Max one pointer of each attribute type plus a termination entry. */
797 attr = kzalloc((count * 2 + 1) * sizeof(attr), GFP_KERNEL);
798 if (!attr)
799 goto err_alloc;
800
801 i915_iter = i915_attr;
802 pmu_iter = pmu_attr;
803 attr_iter = attr;
804
805 /* Initialize supported non-engine counters. */
806 for (i = 0; i < ARRAY_SIZE(events); i++) {
807 char *str;
808
809 if (config_status(i915, events[i].config))
810 continue;
811
812 str = kstrdup(events[i].name, GFP_KERNEL);
813 if (!str)
814 goto err;
815
816 *attr_iter++ = &i915_iter->attr.attr;
817 i915_iter = add_i915_attr(i915_iter, str, events[i].config);
818
819 if (events[i].unit) {
820 str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
821 if (!str)
822 goto err;
823
824 *attr_iter++ = &pmu_iter->attr.attr;
825 pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
826 }
827 }
828
829 /* Initialize supported engine counters. */
830 for_each_engine(engine, i915, id) {
831 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
832 char *str;
833
834 if (engine_event_status(engine,
835 engine_events[i].sample))
836 continue;
837
838 str = kasprintf(GFP_KERNEL, "%s-%s",
839 engine->name, engine_events[i].name);
840 if (!str)
841 goto err;
842
843 *attr_iter++ = &i915_iter->attr.attr;
844 i915_iter =
845 add_i915_attr(i915_iter, str,
846 __I915_PMU_ENGINE(engine->class,
847 engine->instance,
848 engine_events[i].sample));
849
850 str = kasprintf(GFP_KERNEL, "%s-%s.unit",
851 engine->name, engine_events[i].name);
852 if (!str)
853 goto err;
854
855 *attr_iter++ = &pmu_iter->attr.attr;
856 pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
857 }
858 }
859
860 i915->pmu.i915_attr = i915_attr;
861 i915->pmu.pmu_attr = pmu_attr;
862
863 return attr;
864
865err:;
866 for (attr_iter = attr; *attr_iter; attr_iter++)
867 kfree((*attr_iter)->name);
868
869err_alloc:
870 kfree(attr);
871 kfree(i915_attr);
872 kfree(pmu_attr);
873
874 return NULL;
875}
876
877static void free_event_attributes(struct drm_i915_private *i915)
878{
879 struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
880
881 for (; *attr_iter; attr_iter++)
882 kfree((*attr_iter)->name);
883
884 kfree(i915_pmu_events_attr_group.attrs);
885 kfree(i915->pmu.i915_attr);
886 kfree(i915->pmu.pmu_attr);
887
888 i915_pmu_events_attr_group.attrs = NULL;
889 i915->pmu.i915_attr = NULL;
890 i915->pmu.pmu_attr = NULL;
891}
892
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000893static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
894{
895 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000896
897 GEM_BUG_ON(!pmu->base.event_init);
898
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000899 /* Select the first online CPU as a designated reader. */
Tvrtko Ursulin0426c042017-11-23 12:34:32 +0000900 if (!cpumask_weight(&i915_pmu_cpumask))
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000901 cpumask_set_cpu(cpu, &i915_pmu_cpumask);
902
903 return 0;
904}
905
906static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
907{
908 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
909 unsigned int target;
910
911 GEM_BUG_ON(!pmu->base.event_init);
912
913 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
914 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
915 /* Migrate events if there is a valid target */
916 if (target < nr_cpu_ids) {
917 cpumask_set_cpu(target, &i915_pmu_cpumask);
918 perf_pmu_migrate_context(&pmu->base, cpu, target);
919 }
920 }
921
922 return 0;
923}
924
925static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000926
927static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
928{
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000929 enum cpuhp_state slot;
930 int ret;
931
932 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
933 "perf/x86/intel/i915:online",
934 i915_pmu_cpu_online,
935 i915_pmu_cpu_offline);
936 if (ret < 0)
937 return ret;
938
939 slot = ret;
940 ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
941 if (ret) {
942 cpuhp_remove_multi_state(slot);
943 return ret;
944 }
945
946 cpuhp_slot = slot;
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000947 return 0;
948}
949
950static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
951{
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000952 WARN_ON(cpuhp_slot == CPUHP_INVALID);
953 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
954 cpuhp_remove_multi_state(cpuhp_slot);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000955}
956
957void i915_pmu_register(struct drm_i915_private *i915)
958{
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000959 struct intel_engine_cs *engine;
960 enum intel_engine_id id;
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000961 int ret;
962
963 if (INTEL_GEN(i915) <= 2) {
964 DRM_INFO("PMU not supported for this GPU.");
965 return;
966 }
967
Tvrtko Ursulin109ec552018-01-11 08:35:25 +0000968 i915_pmu_events_attr_group.attrs = create_event_attributes(i915);
969 if (!i915_pmu_events_attr_group.attrs) {
970 ret = -ENOMEM;
971 goto err;
972 }
973
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000974 i915->pmu.base.attr_groups = i915_pmu_attr_groups;
975 i915->pmu.base.task_ctx_nr = perf_invalid_context;
976 i915->pmu.base.event_init = i915_pmu_event_init;
977 i915->pmu.base.add = i915_pmu_event_add;
978 i915->pmu.base.del = i915_pmu_event_del;
979 i915->pmu.base.start = i915_pmu_event_start;
980 i915->pmu.base.stop = i915_pmu_event_stop;
981 i915->pmu.base.read = i915_pmu_event_read;
982 i915->pmu.base.event_idx = i915_pmu_event_event_idx;
983
984 spin_lock_init(&i915->pmu.lock);
985 hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
986 i915->pmu.timer.function = i915_sample;
987
Tvrtko Ursulinb3add012017-11-21 18:18:49 +0000988 for_each_engine(engine, i915, id)
989 INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
990 __disable_busy_stats);
991
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +0000992 ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
993 if (ret)
994 goto err;
995
996 ret = i915_pmu_register_cpuhp_state(i915);
997 if (ret)
998 goto err_unreg;
999
1000 return;
1001
1002err_unreg:
1003 perf_pmu_unregister(&i915->pmu.base);
1004err:
1005 i915->pmu.base.event_init = NULL;
Tvrtko Ursulin109ec552018-01-11 08:35:25 +00001006 free_event_attributes(i915);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001007 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
1008}
1009
1010void i915_pmu_unregister(struct drm_i915_private *i915)
1011{
Tvrtko Ursulinb3add012017-11-21 18:18:49 +00001012 struct intel_engine_cs *engine;
1013 enum intel_engine_id id;
1014
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001015 if (!i915->pmu.base.event_init)
1016 return;
1017
1018 WARN_ON(i915->pmu.enable);
1019
1020 hrtimer_cancel(&i915->pmu.timer);
1021
Tvrtko Ursulinb3add012017-11-21 18:18:49 +00001022 for_each_engine(engine, i915, id) {
1023 GEM_BUG_ON(engine->pmu.busy_stats);
1024 flush_delayed_work(&engine->pmu.disable_busy_stats);
1025 }
1026
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001027 i915_pmu_unregister_cpuhp_state(i915);
1028
1029 perf_pmu_unregister(&i915->pmu.base);
1030 i915->pmu.base.event_init = NULL;
Tvrtko Ursulin109ec552018-01-11 08:35:25 +00001031 free_event_attributes(i915);
Tvrtko Ursulinb46a33e2017-11-21 18:18:45 +00001032}