blob: ecfbc37b738bb50fe6f581a5133bf8c28589dd78 [file] [log] [blame]
Robert Braggeec688e2016-11-07 19:49:47 +00001/*
2 * Copyright © 2015-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Robert Bragg <robert@sixbynine.org>
25 */
26
Robert Bragg7abbd8d2016-11-07 19:49:57 +000027
28/**
Robert Bragg16d98b32016-12-07 21:40:33 +000029 * DOC: i915 Perf Overview
Robert Bragg7abbd8d2016-11-07 19:49:57 +000030 *
31 * Gen graphics supports a large number of performance counters that can help
32 * driver and application developers understand and optimize their use of the
33 * GPU.
34 *
35 * This i915 perf interface enables userspace to configure and open a file
36 * descriptor representing a stream of GPU metrics which can then be read() as
37 * a stream of sample records.
38 *
39 * The interface is particularly suited to exposing buffered metrics that are
40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41 *
42 * Streams representing a single context are accessible to applications with a
43 * corresponding drm file descriptor, such that OpenGL can use the interface
44 * without special privileges. Access to system-wide metrics requires root
45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46 * sysctl option.
47 *
Robert Bragg16d98b32016-12-07 21:40:33 +000048 */
49
50/**
51 * DOC: i915 Perf History and Comparison with Core Perf
Robert Bragg7abbd8d2016-11-07 19:49:57 +000052 *
53 * The interface was initially inspired by the core Perf infrastructure but
54 * some notable differences are:
55 *
56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
57 * a perf event primarily corresponds to a single 64bit value, while a stream
58 * might sample sets of tightly-coupled counters, depending on the
59 * configuration. For example the Gen OA unit isn't designed to support
60 * orthogonal configurations of individual counters; it's configured for a set
61 * of related counters. Samples for an i915 perf stream capturing OA metrics
62 * will include a set of counter values packed in a compact HW specific format.
63 * The OA unit supports a number of different packing formats which can be
64 * selected by the user opening the stream. Perf has support for grouping
65 * events, but each event in the group is configured, validated and
66 * authenticated individually with separate system calls.
67 *
68 * i915 perf stream configurations are provided as an array of u64 (key,value)
69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
70 * interleaved with event-type specific members.
71 *
72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73 * The supported metrics are being written to memory by the GPU unsynchronized
74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
75 * the constraints on HW configuration require reports to be filtered before it
76 * would be acceptable to expose them to unprivileged applications - to hide
77 * the metrics of other processes/contexts. For these use cases a read() based
78 * interface is a good fit, and provides an opportunity to filter data as it
79 * gets copied from the GPU mapped buffers to userspace buffers.
80 *
81 *
Robert Bragg16d98b32016-12-07 21:40:33 +000082 * Issues hit with first prototype based on Core Perf
83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Robert Bragg7abbd8d2016-11-07 19:49:57 +000084 *
85 * The first prototype of this driver was based on the core perf
86 * infrastructure, and while we did make that mostly work, with some changes to
87 * perf, we found we were breaking or working around too many assumptions baked
88 * into perf's currently cpu centric design.
89 *
90 * In the end we didn't see a clear benefit to making perf's implementation and
91 * interface more complex by changing design assumptions while we knew we still
92 * wouldn't be able to use any existing perf based userspace tools.
93 *
94 * Also considering the Gen specific nature of the Observability hardware and
95 * how userspace will sometimes need to combine i915 perf OA metrics with
96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97 * expecting the interface to be used by a platform specific userspace such as
98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
99 * a standard vendor/architecture agnostic interface by not using perf.
100 *
101 *
102 * For posterity, in case we might re-visit trying to adapt core perf to be
103 * better suited to exposing i915 metrics these were the main pain points we
104 * hit:
105 *
106 * - The perf based OA PMU driver broke some significant design assumptions:
107 *
108 * Existing perf pmus are used for profiling work on a cpu and we were
109 * introducing the idea of _IS_DEVICE pmus with different security
110 * implications, the need to fake cpu-related data (such as user/kernel
111 * registers) to fit with perf's current design, and adding _DEVICE records
112 * as a way to forward device-specific status records.
113 *
114 * The OA unit writes reports of counters into a circular buffer, without
115 * involvement from the CPU, making our PMU driver the first of a kind.
116 *
117 * Given the way we were periodically forward data from the GPU-mapped, OA
118 * buffer to perf's buffer, those bursts of sample writes looked to perf like
119 * we were sampling too fast and so we had to subvert its throttling checks.
120 *
121 * Perf supports groups of counters and allows those to be read via
122 * transactions internally but transactions currently seem designed to be
123 * explicitly initiated from the cpu (say in response to a userspace read())
124 * and while we could pull a report out of the OA buffer we can't
125 * trigger a report from the cpu on demand.
126 *
127 * Related to being report based; the OA counters are configured in HW as a
128 * set while perf generally expects counter configurations to be orthogonal.
129 * Although counters can be associated with a group leader as they are
130 * opened, there's no clear precedent for being able to provide group-wide
131 * configuration attributes (for example we want to let userspace choose the
132 * OA unit report format used to capture all counters in a set, or specify a
133 * GPU context to filter metrics on). We avoided using perf's grouping
134 * feature and forwarded OA reports to userspace via perf's 'raw' sample
135 * field. This suited our userspace well considering how coupled the counters
136 * are when dealing with normalizing. It would be inconvenient to split
137 * counters up into separate events, only to require userspace to recombine
138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports
139 * for combining with the side-band raw reports it captures using
140 * MI_REPORT_PERF_COUNT commands.
141 *
Robert Bragg16d98b32016-12-07 21:40:33 +0000142 * - As a side note on perf's grouping feature; there was also some concern
Robert Bragg7abbd8d2016-11-07 19:49:57 +0000143 * that using PERF_FORMAT_GROUP as a way to pack together counter values
144 * would quite drastically inflate our sample sizes, which would likely
145 * lower the effective sampling resolutions we could use when the available
146 * memory bandwidth is limited.
147 *
148 * With the OA unit's report formats, counters are packed together as 32
149 * or 40bit values, with the largest report size being 256 bytes.
150 *
151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152 * documented ordering to the values, implying PERF_FORMAT_ID must also be
153 * used to add a 64bit ID before each value; giving 16 bytes per counter.
154 *
155 * Related to counter orthogonality; we can't time share the OA unit, while
156 * event scheduling is a central design idea within perf for allowing
157 * userspace to open + enable more events than can be configured in HW at any
158 * one time. The OA unit is not designed to allow re-configuration while in
159 * use. We can't reconfigure the OA unit without losing internal OA unit
160 * state which we can't access explicitly to save and restore. Reconfiguring
161 * the OA unit is also relatively slow, involving ~100 register writes. From
162 * userspace Mesa also depends on a stable OA configuration when emitting
163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164 * disabled while there are outstanding MI_RPC commands lest we hang the
165 * command streamer.
166 *
167 * The contents of sample records aren't extensible by device drivers (i.e.
168 * the sample_type bits). As an example; Sourab Gupta had been looking to
169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports
170 * into sample records by using the 'raw' field, but it's tricky to pack more
171 * than one thing into this field because events/core.c currently only lets a
172 * pmu give a single raw data pointer plus len which will be copied into the
173 * ring buffer. To include more than the OA report we'd have to copy the
174 * report into an intermediate larger buffer. I'd been considering allowing a
175 * vector of data+len values to be specified for copying the raw data, but
176 * it felt like a kludge to being using the raw field for this purpose.
177 *
178 * - It felt like our perf based PMU was making some technical compromises
179 * just for the sake of using perf:
180 *
181 * perf_event_open() requires events to either relate to a pid or a specific
182 * cpu core, while our device pmu related to neither. Events opened with a
183 * pid will be automatically enabled/disabled according to the scheduling of
184 * that process - so not appropriate for us. When an event is related to a
185 * cpu id, perf ensures pmu methods will be invoked via an inter process
186 * interrupt on that core. To avoid invasive changes our userspace opened OA
187 * perf events for a specific cpu. This was workable but it meant the
188 * majority of the OA driver ran in atomic context, including all OA report
189 * forwarding, which wasn't really necessary in our case and seems to make
190 * our locking requirements somewhat complex as we handled the interaction
191 * with the rest of the i915 driver.
192 */
193
Robert Braggeec688e2016-11-07 19:49:47 +0000194#include <linux/anon_inodes.h>
Robert Braggd7965152016-11-07 19:49:52 +0000195#include <linux/sizes.h>
Lionel Landwerlinf89823c2017-08-03 18:05:50 +0100196#include <linux/uuid.h>
Robert Braggeec688e2016-11-07 19:49:47 +0000197
Chris Wilson10be98a2019-05-28 10:29:49 +0100198#include "gem/i915_gem_context.h"
199#include "gem/i915_gem_pm.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +0100200#include "gt/intel_lrc_reg.h"
201
Robert Braggeec688e2016-11-07 19:49:47 +0000202#include "i915_drv.h"
Jani Nikuladb94e9f2019-08-08 16:42:44 +0300203#include "i915_perf.h"
Michal Wajdeczko5ed7a0c2019-06-26 12:38:26 +0000204#include "oa/i915_oa_hsw.h"
205#include "oa/i915_oa_bdw.h"
206#include "oa/i915_oa_chv.h"
207#include "oa/i915_oa_sklgt2.h"
208#include "oa/i915_oa_sklgt3.h"
209#include "oa/i915_oa_sklgt4.h"
210#include "oa/i915_oa_bxt.h"
211#include "oa/i915_oa_kblgt2.h"
212#include "oa/i915_oa_kblgt3.h"
213#include "oa/i915_oa_glk.h"
214#include "oa/i915_oa_cflgt2.h"
215#include "oa/i915_oa_cflgt3.h"
216#include "oa/i915_oa_cnl.h"
217#include "oa/i915_oa_icl.h"
Robert Braggd7965152016-11-07 19:49:52 +0000218
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200219/* HW requires this to be a power of two, between 128k and 16M, though driver
220 * is currently generally designed assuming the largest 16M size is used such
221 * that the overflow cases are unlikely in normal operation.
222 */
223#define OA_BUFFER_SIZE SZ_16M
224
225#define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
Robert Braggd7965152016-11-07 19:49:52 +0000226
Robert Bragg0dd860c2017-05-11 16:43:28 +0100227/**
228 * DOC: OA Tail Pointer Race
229 *
230 * There's a HW race condition between OA unit tail pointer register updates and
Robert Braggd7965152016-11-07 19:49:52 +0000231 * writes to memory whereby the tail pointer can sometimes get ahead of what's
Robert Bragg0dd860c2017-05-11 16:43:28 +0100232 * been written out to the OA buffer so far (in terms of what's visible to the
233 * CPU).
Robert Braggd7965152016-11-07 19:49:52 +0000234 *
Robert Bragg0dd860c2017-05-11 16:43:28 +0100235 * Although this can be observed explicitly while copying reports to userspace
236 * by checking for a zeroed report-id field in tail reports, we want to account
Robert Bragg19f81df2017-06-13 12:23:03 +0100237 * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
Robert Bragg0dd860c2017-05-11 16:43:28 +0100238 * read() attempts.
Robert Braggd7965152016-11-07 19:49:52 +0000239 *
Robert Bragg0dd860c2017-05-11 16:43:28 +0100240 * In effect we define a tail pointer for reading that lags the real tail
241 * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
242 * time for the corresponding reports to become visible to the CPU.
Robert Braggd7965152016-11-07 19:49:52 +0000243 *
Robert Bragg0dd860c2017-05-11 16:43:28 +0100244 * To manage this we actually track two tail pointers:
245 * 1) An 'aging' tail with an associated timestamp that is tracked until we
246 * can trust the corresponding data is visible to the CPU; at which point
247 * it is considered 'aged'.
248 * 2) An 'aged' tail that can be used for read()ing.
249 *
250 * The two separate pointers let us decouple read()s from tail pointer aging.
251 *
252 * The tail pointers are checked and updated at a limited rate within a hrtimer
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800253 * callback (the same callback that is used for delivering EPOLLIN events)
Robert Bragg0dd860c2017-05-11 16:43:28 +0100254 *
255 * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
256 * indicates that an updated tail pointer is needed.
257 *
258 * Most of the implementation details for this workaround are in
Robert Bragg19f81df2017-06-13 12:23:03 +0100259 * oa_buffer_check_unlocked() and _append_oa_reports()
Robert Bragg0dd860c2017-05-11 16:43:28 +0100260 *
261 * Note for posterity: previously the driver used to define an effective tail
262 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
263 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
264 * This was flawed considering that the OA unit may also automatically generate
265 * non-periodic reports (such as on context switch) or the OA unit may be
266 * enabled without any periodic sampling.
Robert Braggd7965152016-11-07 19:49:52 +0000267 */
268#define OA_TAIL_MARGIN_NSEC 100000ULL
Robert Bragg0dd860c2017-05-11 16:43:28 +0100269#define INVALID_TAIL_PTR 0xffffffff
Robert Braggd7965152016-11-07 19:49:52 +0000270
271/* frequency for checking whether the OA unit has written new reports to the
272 * circular OA buffer...
273 */
274#define POLL_FREQUENCY 200
275#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
276
Robert Braggccdf6342016-11-07 19:49:54 +0000277/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
Robert Braggccdf6342016-11-07 19:49:54 +0000278static u32 i915_perf_stream_paranoid = true;
279
Robert Braggd7965152016-11-07 19:49:52 +0000280/* The maximum exponent the hardware accepts is 63 (essentially it selects one
281 * of the 64bit timestamp bits to trigger reports from) but there's currently
282 * no known use case for sampling as infrequently as once per 47 thousand years.
283 *
284 * Since the timestamps included in OA reports are only 32bits it seems
285 * reasonable to limit the OA exponent where it's still possible to account for
286 * overflow in OA report timestamps.
287 */
288#define OA_EXPONENT_MAX 31
289
290#define INVALID_CTX_ID 0xffffffff
291
Robert Bragg19f81df2017-06-13 12:23:03 +0100292/* On Gen8+ automatically triggered OA reports include a 'reason' field... */
293#define OAREPORT_REASON_MASK 0x3f
294#define OAREPORT_REASON_SHIFT 19
295#define OAREPORT_REASON_TIMER (1<<0)
296#define OAREPORT_REASON_CTX_SWITCH (1<<3)
297#define OAREPORT_REASON_CLK_RATIO (1<<5)
298
Robert Braggd7965152016-11-07 19:49:52 +0000299
Robert Bragg00319ba2016-11-07 19:49:55 +0000300/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
301 *
Robert Bragg155e9412017-06-13 12:23:05 +0100302 * The highest sampling frequency we can theoretically program the OA unit
303 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
304 *
305 * Initialized just before we register the sysctl parameter.
Robert Bragg00319ba2016-11-07 19:49:55 +0000306 */
Robert Bragg155e9412017-06-13 12:23:05 +0100307static int oa_sample_rate_hard_limit;
Robert Bragg00319ba2016-11-07 19:49:55 +0000308
309/* Theoretically we can program the OA unit to sample every 160ns but don't
310 * allow that by default unless root...
311 *
312 * The default threshold of 100000Hz is based on perf's similar
313 * kernel.perf_event_max_sample_rate sysctl parameter.
314 */
315static u32 i915_oa_max_sample_rate = 100000;
316
Robert Braggd7965152016-11-07 19:49:52 +0000317/* XXX: beware if future OA HW adds new report formats that the current
318 * code assumes all reports have a power-of-two size and ~(size - 1) can
319 * be used as a mask to align the OA tail pointer.
320 */
Jani Nikula6ebb6d82018-06-13 14:49:29 +0300321static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
Robert Braggd7965152016-11-07 19:49:52 +0000322 [I915_OA_FORMAT_A13] = { 0, 64 },
323 [I915_OA_FORMAT_A29] = { 1, 128 },
324 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
325 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
326 [I915_OA_FORMAT_B4_C8] = { 4, 64 },
327 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
328 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
329 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
330};
331
Jani Nikula6ebb6d82018-06-13 14:49:29 +0300332static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
Robert Bragg19f81df2017-06-13 12:23:03 +0100333 [I915_OA_FORMAT_A12] = { 0, 64 },
334 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
335 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
336 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
337};
338
Robert Braggd7965152016-11-07 19:49:52 +0000339#define SAMPLE_OA_REPORT (1<<0)
Robert Braggeec688e2016-11-07 19:49:47 +0000340
Robert Bragg16d98b32016-12-07 21:40:33 +0000341/**
342 * struct perf_open_properties - for validated properties given to open a stream
343 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
344 * @single_context: Whether a single or all gpu contexts should be monitored
345 * @ctx_handle: A gem ctx handle for use with @single_context
346 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
347 * @oa_format: An OA unit HW report format
348 * @oa_periodic: Whether to enable periodic OA unit sampling
349 * @oa_period_exponent: The OA unit sampling period is derived from this
350 *
351 * As read_properties_unlocked() enumerates and validates the properties given
352 * to open a stream of metrics the configuration is built up in the structure
353 * which starts out zero initialized.
354 */
Robert Braggeec688e2016-11-07 19:49:47 +0000355struct perf_open_properties {
356 u32 sample_flags;
357
358 u64 single_context:1;
359 u64 ctx_handle;
Robert Braggd7965152016-11-07 19:49:52 +0000360
361 /* OA sampling state */
362 int metrics_set;
363 int oa_format;
364 bool oa_periodic;
365 int oa_period_exponent;
Robert Braggeec688e2016-11-07 19:49:47 +0000366};
367
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700368static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
369
Lionel Landwerlinf89823c2017-08-03 18:05:50 +0100370static void free_oa_config(struct drm_i915_private *dev_priv,
371 struct i915_oa_config *oa_config)
372{
373 if (!PTR_ERR(oa_config->flex_regs))
374 kfree(oa_config->flex_regs);
375 if (!PTR_ERR(oa_config->b_counter_regs))
376 kfree(oa_config->b_counter_regs);
377 if (!PTR_ERR(oa_config->mux_regs))
378 kfree(oa_config->mux_regs);
379 kfree(oa_config);
380}
381
382static void put_oa_config(struct drm_i915_private *dev_priv,
383 struct i915_oa_config *oa_config)
384{
385 if (!atomic_dec_and_test(&oa_config->ref_count))
386 return;
387
388 free_oa_config(dev_priv, oa_config);
389}
390
391static int get_oa_config(struct drm_i915_private *dev_priv,
392 int metrics_set,
393 struct i915_oa_config **out_config)
394{
395 int ret;
396
397 if (metrics_set == 1) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700398 *out_config = &dev_priv->perf.test_config;
399 atomic_inc(&dev_priv->perf.test_config.ref_count);
Lionel Landwerlinf89823c2017-08-03 18:05:50 +0100400 return 0;
401 }
402
403 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
404 if (ret)
405 return ret;
406
407 *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
408 if (!*out_config)
409 ret = -EINVAL;
410 else
411 atomic_inc(&(*out_config)->ref_count);
412
413 mutex_unlock(&dev_priv->perf.metrics_lock);
414
415 return ret;
416}
417
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700418static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
Robert Bragg19f81df2017-06-13 12:23:03 +0100419{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700420 struct drm_i915_private *dev_priv = stream->dev_priv;
421
Robert Bragg19f81df2017-06-13 12:23:03 +0100422 return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
423}
424
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700425static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
Robert Bragg19f81df2017-06-13 12:23:03 +0100426{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700427 struct drm_i915_private *dev_priv = stream->dev_priv;
Robert Bragg19f81df2017-06-13 12:23:03 +0100428 u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
429
430 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
431}
432
Robert Bragg0dd860c2017-05-11 16:43:28 +0100433/**
Robert Bragg19f81df2017-06-13 12:23:03 +0100434 * oa_buffer_check_unlocked - check for data and update tail ptr state
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700435 * @stream: i915 stream instance
Robert Braggd7965152016-11-07 19:49:52 +0000436 *
Robert Bragg0dd860c2017-05-11 16:43:28 +0100437 * This is either called via fops (for blocking reads in user ctx) or the poll
438 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
439 * if there is data available for userspace to read.
Robert Braggd7965152016-11-07 19:49:52 +0000440 *
Robert Bragg0dd860c2017-05-11 16:43:28 +0100441 * This function is central to providing a workaround for the OA unit tail
442 * pointer having a race with respect to what data is visible to the CPU.
443 * It is responsible for reading tail pointers from the hardware and giving
444 * the pointers time to 'age' before they are made available for reading.
445 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
446 *
447 * Besides returning true when there is data available to read() this function
448 * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
449 * and .aged_tail_idx state used for reading.
450 *
451 * Note: It's safe to read OA config state here unlocked, assuming that this is
452 * only called while the stream is enabled, while the global OA configuration
453 * can't be modified.
454 *
455 * Returns: %true if the OA buffer contains data, else %false
Robert Braggd7965152016-11-07 19:49:52 +0000456 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700457static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
Robert Braggd7965152016-11-07 19:49:52 +0000458{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700459 struct drm_i915_private *dev_priv = stream->dev_priv;
460 int report_size = stream->oa_buffer.format_size;
Robert Bragg0dd860c2017-05-11 16:43:28 +0100461 unsigned long flags;
462 unsigned int aged_idx;
Robert Bragg0dd860c2017-05-11 16:43:28 +0100463 u32 head, hw_tail, aged_tail, aging_tail;
464 u64 now;
Robert Braggd7965152016-11-07 19:49:52 +0000465
Robert Bragg0dd860c2017-05-11 16:43:28 +0100466 /* We have to consider the (unlikely) possibility that read() errors
467 * could result in an OA buffer reset which might reset the head,
468 * tails[] and aged_tail state.
469 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700470 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg0dd860c2017-05-11 16:43:28 +0100471
472 /* NB: The head we observe here might effectively be a little out of
473 * date (between head and tails[aged_idx].offset if there is currently
474 * a read() in progress.
475 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700476 head = stream->oa_buffer.head;
Robert Bragg0dd860c2017-05-11 16:43:28 +0100477
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700478 aged_idx = stream->oa_buffer.aged_tail_idx;
479 aged_tail = stream->oa_buffer.tails[aged_idx].offset;
480 aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
Robert Bragg0dd860c2017-05-11 16:43:28 +0100481
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700482 hw_tail = dev_priv->perf.ops.oa_hw_tail_read(stream);
Robert Bragg0dd860c2017-05-11 16:43:28 +0100483
484 /* The tail pointer increases in 64 byte increments,
485 * not in report_size steps...
486 */
487 hw_tail &= ~(report_size - 1);
488
489 now = ktime_get_mono_fast_ns();
490
Robert Bragg4117ebc2017-05-11 16:43:30 +0100491 /* Update the aged tail
492 *
493 * Flip the tail pointer available for read()s once the aging tail is
494 * old enough to trust that the corresponding data will be visible to
495 * the CPU...
496 *
497 * Do this before updating the aging pointer in case we may be able to
498 * immediately start aging a new pointer too (if new data has become
499 * available) without needing to wait for a later hrtimer callback.
500 */
501 if (aging_tail != INVALID_TAIL_PTR &&
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700502 ((now - stream->oa_buffer.aging_timestamp) >
Robert Bragg4117ebc2017-05-11 16:43:30 +0100503 OA_TAIL_MARGIN_NSEC)) {
Robert Bragg19f81df2017-06-13 12:23:03 +0100504
Robert Bragg4117ebc2017-05-11 16:43:30 +0100505 aged_idx ^= 1;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700506 stream->oa_buffer.aged_tail_idx = aged_idx;
Robert Bragg4117ebc2017-05-11 16:43:30 +0100507
508 aged_tail = aging_tail;
509
510 /* Mark that we need a new pointer to start aging... */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700511 stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
Robert Bragg4117ebc2017-05-11 16:43:30 +0100512 aging_tail = INVALID_TAIL_PTR;
513 }
514
Robert Bragg0dd860c2017-05-11 16:43:28 +0100515 /* Update the aging tail
516 *
517 * We throttle aging tail updates until we have a new tail that
518 * represents >= one report more data than is already available for
519 * reading. This ensures there will be enough data for a successful
520 * read once this new pointer has aged and ensures we will give the new
521 * pointer time to age.
522 */
523 if (aging_tail == INVALID_TAIL_PTR &&
524 (aged_tail == INVALID_TAIL_PTR ||
525 OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700526 struct i915_vma *vma = stream->oa_buffer.vma;
Robert Bragg0dd860c2017-05-11 16:43:28 +0100527 u32 gtt_offset = i915_ggtt_offset(vma);
528
529 /* Be paranoid and do a bounds check on the pointer read back
530 * from hardware, just in case some spurious hardware condition
531 * could put the tail out of bounds...
532 */
533 if (hw_tail >= gtt_offset &&
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200534 hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700535 stream->oa_buffer.tails[!aged_idx].offset =
Robert Bragg0dd860c2017-05-11 16:43:28 +0100536 aging_tail = hw_tail;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700537 stream->oa_buffer.aging_timestamp = now;
Robert Bragg0dd860c2017-05-11 16:43:28 +0100538 } else {
539 DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
540 hw_tail);
541 }
542 }
543
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700544 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg0dd860c2017-05-11 16:43:28 +0100545
546 return aged_tail == INVALID_TAIL_PTR ?
547 false : OA_TAKEN(aged_tail, head) >= report_size;
Robert Braggd7965152016-11-07 19:49:52 +0000548}
549
550/**
Robert Bragg16d98b32016-12-07 21:40:33 +0000551 * append_oa_status - Appends a status record to a userspace read() buffer.
552 * @stream: An i915-perf stream opened for OA metrics
553 * @buf: destination buffer given by userspace
554 * @count: the number of bytes userspace wants to read
555 * @offset: (inout): the current position for writing into @buf
556 * @type: The kind of status to report to userspace
557 *
558 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
559 * into the userspace read() buffer.
560 *
561 * The @buf @offset will only be updated on success.
562 *
563 * Returns: 0 on success, negative error code on failure.
Robert Braggd7965152016-11-07 19:49:52 +0000564 */
565static int append_oa_status(struct i915_perf_stream *stream,
566 char __user *buf,
567 size_t count,
568 size_t *offset,
569 enum drm_i915_perf_record_type type)
570{
571 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
572
573 if ((count - *offset) < header.size)
574 return -ENOSPC;
575
576 if (copy_to_user(buf + *offset, &header, sizeof(header)))
577 return -EFAULT;
578
579 (*offset) += header.size;
580
581 return 0;
582}
583
584/**
Robert Bragg16d98b32016-12-07 21:40:33 +0000585 * append_oa_sample - Copies single OA report into userspace read() buffer.
586 * @stream: An i915-perf stream opened for OA metrics
587 * @buf: destination buffer given by userspace
588 * @count: the number of bytes userspace wants to read
589 * @offset: (inout): the current position for writing into @buf
590 * @report: A single OA report to (optionally) include as part of the sample
591 *
592 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
593 * properties when opening a stream, tracked as `stream->sample_flags`. This
594 * function copies the requested components of a single sample to the given
595 * read() @buf.
596 *
597 * The @buf @offset will only be updated on success.
598 *
599 * Returns: 0 on success, negative error code on failure.
Robert Braggd7965152016-11-07 19:49:52 +0000600 */
601static int append_oa_sample(struct i915_perf_stream *stream,
602 char __user *buf,
603 size_t count,
604 size_t *offset,
605 const u8 *report)
606{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700607 int report_size = stream->oa_buffer.format_size;
Robert Braggd7965152016-11-07 19:49:52 +0000608 struct drm_i915_perf_record_header header;
609 u32 sample_flags = stream->sample_flags;
610
611 header.type = DRM_I915_PERF_RECORD_SAMPLE;
612 header.pad = 0;
613 header.size = stream->sample_size;
614
615 if ((count - *offset) < header.size)
616 return -ENOSPC;
617
618 buf += *offset;
619 if (copy_to_user(buf, &header, sizeof(header)))
620 return -EFAULT;
621 buf += sizeof(header);
622
623 if (sample_flags & SAMPLE_OA_REPORT) {
624 if (copy_to_user(buf, report, report_size))
625 return -EFAULT;
626 }
627
628 (*offset) += header.size;
629
630 return 0;
631}
632
633/**
634 * Copies all buffered OA reports into userspace read() buffer.
635 * @stream: An i915-perf stream opened for OA metrics
636 * @buf: destination buffer given by userspace
637 * @count: the number of bytes userspace wants to read
638 * @offset: (inout): the current position for writing into @buf
Robert Braggd7965152016-11-07 19:49:52 +0000639 *
Robert Bragg16d98b32016-12-07 21:40:33 +0000640 * Notably any error condition resulting in a short read (-%ENOSPC or
641 * -%EFAULT) will be returned even though one or more records may
Robert Braggd7965152016-11-07 19:49:52 +0000642 * have been successfully copied. In this case it's up to the caller
643 * to decide if the error should be squashed before returning to
644 * userspace.
645 *
646 * Note: reports are consumed from the head, and appended to the
Robert Bragge81b3a52017-05-11 16:43:24 +0100647 * tail, so the tail chases the head?... If you think that's mad
Robert Braggd7965152016-11-07 19:49:52 +0000648 * and back-to-front you're not alone, but this follows the
649 * Gen PRM naming convention.
Robert Bragg16d98b32016-12-07 21:40:33 +0000650 *
651 * Returns: 0 on success, negative error code on failure.
Robert Braggd7965152016-11-07 19:49:52 +0000652 */
Robert Bragg19f81df2017-06-13 12:23:03 +0100653static int gen8_append_oa_reports(struct i915_perf_stream *stream,
654 char __user *buf,
655 size_t count,
656 size_t *offset)
657{
658 struct drm_i915_private *dev_priv = stream->dev_priv;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700659 int report_size = stream->oa_buffer.format_size;
660 u8 *oa_buf_base = stream->oa_buffer.vaddr;
661 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200662 u32 mask = (OA_BUFFER_SIZE - 1);
Robert Bragg19f81df2017-06-13 12:23:03 +0100663 size_t start_offset = *offset;
664 unsigned long flags;
665 unsigned int aged_tail_idx;
666 u32 head, tail;
667 u32 taken;
668 int ret = 0;
669
670 if (WARN_ON(!stream->enabled))
671 return -EIO;
672
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700673 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg19f81df2017-06-13 12:23:03 +0100674
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700675 head = stream->oa_buffer.head;
676 aged_tail_idx = stream->oa_buffer.aged_tail_idx;
677 tail = stream->oa_buffer.tails[aged_tail_idx].offset;
Robert Bragg19f81df2017-06-13 12:23:03 +0100678
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700679 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg19f81df2017-06-13 12:23:03 +0100680
681 /*
682 * An invalid tail pointer here means we're still waiting for the poll
683 * hrtimer callback to give us a pointer
684 */
685 if (tail == INVALID_TAIL_PTR)
686 return -EAGAIN;
687
688 /*
689 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
690 * while indexing relative to oa_buf_base.
691 */
692 head -= gtt_offset;
693 tail -= gtt_offset;
694
695 /*
696 * An out of bounds or misaligned head or tail pointer implies a driver
697 * bug since we validate + align the tail pointers we read from the
698 * hardware and we are in full control of the head pointer which should
699 * only be incremented by multiples of the report size (notably also
700 * all a power of two).
701 */
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200702 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
703 tail > OA_BUFFER_SIZE || tail % report_size,
Robert Bragg19f81df2017-06-13 12:23:03 +0100704 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
705 head, tail))
706 return -EIO;
707
708
709 for (/* none */;
710 (taken = OA_TAKEN(tail, head));
711 head = (head + report_size) & mask) {
712 u8 *report = oa_buf_base + head;
713 u32 *report32 = (void *)report;
714 u32 ctx_id;
715 u32 reason;
716
717 /*
718 * All the report sizes factor neatly into the buffer
719 * size so we never expect to see a report split
720 * between the beginning and end of the buffer.
721 *
722 * Given the initial alignment check a misalignment
723 * here would imply a driver bug that would result
724 * in an overrun.
725 */
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200726 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
Robert Bragg19f81df2017-06-13 12:23:03 +0100727 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
728 break;
729 }
730
731 /*
732 * The reason field includes flags identifying what
733 * triggered this specific report (mostly timer
734 * triggered or e.g. due to a context switch).
735 *
736 * This field is never expected to be zero so we can
737 * check that the report isn't invalid before copying
738 * it to userspace...
739 */
740 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
741 OAREPORT_REASON_MASK);
742 if (reason == 0) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700743 if (__ratelimit(&dev_priv->perf.spurious_report_rs))
Robert Bragg19f81df2017-06-13 12:23:03 +0100744 DRM_NOTE("Skipping spurious, invalid OA report\n");
745 continue;
746 }
747
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700748 ctx_id = report32[2] & stream->specific_ctx_id_mask;
Robert Bragg19f81df2017-06-13 12:23:03 +0100749
750 /*
751 * Squash whatever is in the CTX_ID field if it's marked as
752 * invalid to be sure we avoid false-positive, single-context
753 * filtering below...
754 *
755 * Note: that we don't clear the valid_ctx_bit so userspace can
756 * understand that the ID has been squashed by the kernel.
757 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700758 if (!(report32[0] & dev_priv->perf.gen8_valid_ctx_bit))
Robert Bragg19f81df2017-06-13 12:23:03 +0100759 ctx_id = report32[2] = INVALID_CTX_ID;
760
761 /*
762 * NB: For Gen 8 the OA unit no longer supports clock gating
763 * off for a specific context and the kernel can't securely
764 * stop the counters from updating as system-wide / global
765 * values.
766 *
767 * Automatic reports now include a context ID so reports can be
768 * filtered on the cpu but it's not worth trying to
769 * automatically subtract/hide counter progress for other
770 * contexts while filtering since we can't stop userspace
771 * issuing MI_REPORT_PERF_COUNT commands which would still
772 * provide a side-band view of the real values.
773 *
774 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
775 * to normalize counters for a single filtered context then it
776 * needs be forwarded bookend context-switch reports so that it
777 * can track switches in between MI_REPORT_PERF_COUNT commands
778 * and can itself subtract/ignore the progress of counters
779 * associated with other contexts. Note that the hardware
780 * automatically triggers reports when switching to a new
781 * context which are tagged with the ID of the newly active
782 * context. To avoid the complexity (and likely fragility) of
783 * reading ahead while parsing reports to try and minimize
784 * forwarding redundant context switch reports (i.e. between
785 * other, unrelated contexts) we simply elect to forward them
786 * all.
787 *
788 * We don't rely solely on the reason field to identify context
789 * switches since it's not-uncommon for periodic samples to
790 * identify a switch before any 'context switch' report.
791 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700792 if (!dev_priv->perf.exclusive_stream->ctx ||
793 stream->specific_ctx_id == ctx_id ||
794 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
Robert Bragg19f81df2017-06-13 12:23:03 +0100795 reason & OAREPORT_REASON_CTX_SWITCH) {
796
797 /*
798 * While filtering for a single context we avoid
799 * leaking the IDs of other contexts.
800 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700801 if (dev_priv->perf.exclusive_stream->ctx &&
802 stream->specific_ctx_id != ctx_id) {
Robert Bragg19f81df2017-06-13 12:23:03 +0100803 report32[2] = INVALID_CTX_ID;
804 }
805
806 ret = append_oa_sample(stream, buf, count, offset,
807 report);
808 if (ret)
809 break;
810
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700811 stream->oa_buffer.last_ctx_id = ctx_id;
Robert Bragg19f81df2017-06-13 12:23:03 +0100812 }
813
814 /*
815 * The above reason field sanity check is based on
816 * the assumption that the OA buffer is initially
817 * zeroed and we reset the field after copying so the
818 * check is still meaningful once old reports start
819 * being overwritten.
820 */
821 report32[0] = 0;
822 }
823
824 if (start_offset != *offset) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700825 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg19f81df2017-06-13 12:23:03 +0100826
827 /*
828 * We removed the gtt_offset for the copy loop above, indexing
829 * relative to oa_buf_base so put back here...
830 */
831 head += gtt_offset;
832
833 I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700834 stream->oa_buffer.head = head;
Robert Bragg19f81df2017-06-13 12:23:03 +0100835
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700836 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg19f81df2017-06-13 12:23:03 +0100837 }
838
839 return ret;
840}
841
842/**
843 * gen8_oa_read - copy status records then buffered OA reports
844 * @stream: An i915-perf stream opened for OA metrics
845 * @buf: destination buffer given by userspace
846 * @count: the number of bytes userspace wants to read
847 * @offset: (inout): the current position for writing into @buf
848 *
849 * Checks OA unit status registers and if necessary appends corresponding
850 * status records for userspace (such as for a buffer full condition) and then
851 * initiate appending any buffered OA reports.
852 *
853 * Updates @offset according to the number of bytes successfully copied into
854 * the userspace buffer.
855 *
856 * NB: some data may be successfully copied to the userspace buffer
857 * even if an error is returned, and this is reflected in the
858 * updated @offset.
859 *
860 * Returns: zero on success or a negative error code
861 */
862static int gen8_oa_read(struct i915_perf_stream *stream,
863 char __user *buf,
864 size_t count,
865 size_t *offset)
866{
867 struct drm_i915_private *dev_priv = stream->dev_priv;
868 u32 oastatus;
869 int ret;
870
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700871 if (WARN_ON(!stream->oa_buffer.vaddr))
Robert Bragg19f81df2017-06-13 12:23:03 +0100872 return -EIO;
873
874 oastatus = I915_READ(GEN8_OASTATUS);
875
876 /*
877 * We treat OABUFFER_OVERFLOW as a significant error:
878 *
879 * Although theoretically we could handle this more gracefully
880 * sometimes, some Gens don't correctly suppress certain
881 * automatically triggered reports in this condition and so we
882 * have to assume that old reports are now being trampled
883 * over.
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200884 *
885 * Considering how we don't currently give userspace control
886 * over the OA buffer size and always configure a large 16MB
887 * buffer, then a buffer overflow does anyway likely indicate
888 * that something has gone quite badly wrong.
Robert Bragg19f81df2017-06-13 12:23:03 +0100889 */
890 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
891 ret = append_oa_status(stream, buf, count, offset,
892 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
893 if (ret)
894 return ret;
895
896 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700897 stream->period_exponent);
Robert Bragg19f81df2017-06-13 12:23:03 +0100898
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700899 dev_priv->perf.ops.oa_disable(stream);
900 dev_priv->perf.ops.oa_enable(stream);
Robert Bragg19f81df2017-06-13 12:23:03 +0100901
902 /*
903 * Note: .oa_enable() is expected to re-init the oabuffer and
904 * reset GEN8_OASTATUS for us
905 */
906 oastatus = I915_READ(GEN8_OASTATUS);
907 }
908
909 if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
910 ret = append_oa_status(stream, buf, count, offset,
911 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
912 if (ret)
913 return ret;
914 I915_WRITE(GEN8_OASTATUS,
915 oastatus & ~GEN8_OASTATUS_REPORT_LOST);
916 }
917
918 return gen8_append_oa_reports(stream, buf, count, offset);
919}
920
921/**
922 * Copies all buffered OA reports into userspace read() buffer.
923 * @stream: An i915-perf stream opened for OA metrics
924 * @buf: destination buffer given by userspace
925 * @count: the number of bytes userspace wants to read
926 * @offset: (inout): the current position for writing into @buf
927 *
928 * Notably any error condition resulting in a short read (-%ENOSPC or
929 * -%EFAULT) will be returned even though one or more records may
930 * have been successfully copied. In this case it's up to the caller
931 * to decide if the error should be squashed before returning to
932 * userspace.
933 *
934 * Note: reports are consumed from the head, and appended to the
935 * tail, so the tail chases the head?... If you think that's mad
936 * and back-to-front you're not alone, but this follows the
937 * Gen PRM naming convention.
938 *
939 * Returns: 0 on success, negative error code on failure.
940 */
Robert Braggd7965152016-11-07 19:49:52 +0000941static int gen7_append_oa_reports(struct i915_perf_stream *stream,
942 char __user *buf,
943 size_t count,
Robert Bragg3bb335c2017-05-11 16:43:27 +0100944 size_t *offset)
Robert Braggd7965152016-11-07 19:49:52 +0000945{
946 struct drm_i915_private *dev_priv = stream->dev_priv;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700947 int report_size = stream->oa_buffer.format_size;
948 u8 *oa_buf_base = stream->oa_buffer.vaddr;
949 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200950 u32 mask = (OA_BUFFER_SIZE - 1);
Robert Bragg3bb335c2017-05-11 16:43:27 +0100951 size_t start_offset = *offset;
Robert Bragg0dd860c2017-05-11 16:43:28 +0100952 unsigned long flags;
953 unsigned int aged_tail_idx;
954 u32 head, tail;
Robert Braggd7965152016-11-07 19:49:52 +0000955 u32 taken;
956 int ret = 0;
957
958 if (WARN_ON(!stream->enabled))
959 return -EIO;
960
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700961 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
Robert Braggf2790202017-05-11 16:43:26 +0100962
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700963 head = stream->oa_buffer.head;
964 aged_tail_idx = stream->oa_buffer.aged_tail_idx;
965 tail = stream->oa_buffer.tails[aged_tail_idx].offset;
Robert Bragg0dd860c2017-05-11 16:43:28 +0100966
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -0700967 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg0dd860c2017-05-11 16:43:28 +0100968
969 /* An invalid tail pointer here means we're still waiting for the poll
970 * hrtimer callback to give us a pointer
Robert Braggf2790202017-05-11 16:43:26 +0100971 */
Robert Bragg0dd860c2017-05-11 16:43:28 +0100972 if (tail == INVALID_TAIL_PTR)
Robert Braggd7965152016-11-07 19:49:52 +0000973 return -EAGAIN;
974
Robert Bragg0dd860c2017-05-11 16:43:28 +0100975 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
976 * while indexing relative to oa_buf_base.
977 */
978 head -= gtt_offset;
979 tail -= gtt_offset;
980
981 /* An out of bounds or misaligned head or tail pointer implies a driver
982 * bug since we validate + align the tail pointers we read from the
983 * hardware and we are in full control of the head pointer which should
984 * only be incremented by multiples of the report size (notably also
985 * all a power of two).
986 */
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200987 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
988 tail > OA_BUFFER_SIZE || tail % report_size,
Robert Bragg0dd860c2017-05-11 16:43:28 +0100989 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
990 head, tail))
991 return -EIO;
992
Robert Braggd7965152016-11-07 19:49:52 +0000993
994 for (/* none */;
995 (taken = OA_TAKEN(tail, head));
996 head = (head + report_size) & mask) {
997 u8 *report = oa_buf_base + head;
998 u32 *report32 = (void *)report;
999
1000 /* All the report sizes factor neatly into the buffer
1001 * size so we never expect to see a report split
1002 * between the beginning and end of the buffer.
1003 *
1004 * Given the initial alignment check a misalignment
1005 * here would imply a driver bug that would result
1006 * in an overrun.
1007 */
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001008 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
Robert Braggd7965152016-11-07 19:49:52 +00001009 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
1010 break;
1011 }
1012
1013 /* The report-ID field for periodic samples includes
1014 * some undocumented flags related to what triggered
1015 * the report and is never expected to be zero so we
1016 * can check that the report isn't invalid before
1017 * copying it to userspace...
1018 */
1019 if (report32[0] == 0) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001020 if (__ratelimit(&dev_priv->perf.spurious_report_rs))
Robert Bragg712122e2017-05-11 16:43:31 +01001021 DRM_NOTE("Skipping spurious, invalid OA report\n");
Robert Braggd7965152016-11-07 19:49:52 +00001022 continue;
1023 }
1024
1025 ret = append_oa_sample(stream, buf, count, offset, report);
1026 if (ret)
1027 break;
1028
1029 /* The above report-id field sanity check is based on
1030 * the assumption that the OA buffer is initially
1031 * zeroed and we reset the field after copying so the
1032 * check is still meaningful once old reports start
1033 * being overwritten.
1034 */
1035 report32[0] = 0;
1036 }
1037
Robert Bragg3bb335c2017-05-11 16:43:27 +01001038 if (start_offset != *offset) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001039 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg0dd860c2017-05-11 16:43:28 +01001040
Robert Bragg3bb335c2017-05-11 16:43:27 +01001041 /* We removed the gtt_offset for the copy loop above, indexing
1042 * relative to oa_buf_base so put back here...
1043 */
1044 head += gtt_offset;
1045
1046 I915_WRITE(GEN7_OASTATUS2,
1047 ((head & GEN7_OASTATUS2_HEAD_MASK) |
Lionel Landwerlinb82ed432018-03-26 10:08:26 +01001048 GEN7_OASTATUS2_MEM_SELECT_GGTT));
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001049 stream->oa_buffer.head = head;
Robert Bragg0dd860c2017-05-11 16:43:28 +01001050
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001051 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg3bb335c2017-05-11 16:43:27 +01001052 }
Robert Braggd7965152016-11-07 19:49:52 +00001053
1054 return ret;
1055}
1056
Robert Bragg16d98b32016-12-07 21:40:33 +00001057/**
1058 * gen7_oa_read - copy status records then buffered OA reports
1059 * @stream: An i915-perf stream opened for OA metrics
1060 * @buf: destination buffer given by userspace
1061 * @count: the number of bytes userspace wants to read
1062 * @offset: (inout): the current position for writing into @buf
1063 *
1064 * Checks Gen 7 specific OA unit status registers and if necessary appends
1065 * corresponding status records for userspace (such as for a buffer full
1066 * condition) and then initiate appending any buffered OA reports.
1067 *
1068 * Updates @offset according to the number of bytes successfully copied into
1069 * the userspace buffer.
1070 *
1071 * Returns: zero on success or a negative error code
1072 */
Robert Braggd7965152016-11-07 19:49:52 +00001073static int gen7_oa_read(struct i915_perf_stream *stream,
1074 char __user *buf,
1075 size_t count,
1076 size_t *offset)
1077{
1078 struct drm_i915_private *dev_priv = stream->dev_priv;
Robert Braggd7965152016-11-07 19:49:52 +00001079 u32 oastatus1;
Robert Braggd7965152016-11-07 19:49:52 +00001080 int ret;
1081
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001082 if (WARN_ON(!stream->oa_buffer.vaddr))
Robert Braggd7965152016-11-07 19:49:52 +00001083 return -EIO;
1084
Robert Braggd7965152016-11-07 19:49:52 +00001085 oastatus1 = I915_READ(GEN7_OASTATUS1);
1086
Robert Braggd7965152016-11-07 19:49:52 +00001087 /* XXX: On Haswell we don't have a safe way to clear oastatus1
1088 * bits while the OA unit is enabled (while the tail pointer
1089 * may be updated asynchronously) so we ignore status bits
1090 * that have already been reported to userspace.
1091 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001092 oastatus1 &= ~dev_priv->perf.gen7_latched_oastatus1;
Robert Braggd7965152016-11-07 19:49:52 +00001093
1094 /* We treat OABUFFER_OVERFLOW as a significant error:
1095 *
1096 * - The status can be interpreted to mean that the buffer is
1097 * currently full (with a higher precedence than OA_TAKEN()
1098 * which will start to report a near-empty buffer after an
1099 * overflow) but it's awkward that we can't clear the status
1100 * on Haswell, so without a reset we won't be able to catch
1101 * the state again.
1102 *
1103 * - Since it also implies the HW has started overwriting old
1104 * reports it may also affect our sanity checks for invalid
1105 * reports when copying to userspace that assume new reports
1106 * are being written to cleared memory.
1107 *
1108 * - In the future we may want to introduce a flight recorder
1109 * mode where the driver will automatically maintain a safe
1110 * guard band between head/tail, avoiding this overflow
1111 * condition, but we avoid the added driver complexity for
1112 * now.
1113 */
1114 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1115 ret = append_oa_status(stream, buf, count, offset,
1116 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1117 if (ret)
1118 return ret;
1119
Robert Bragg19f81df2017-06-13 12:23:03 +01001120 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001121 stream->period_exponent);
Robert Braggd7965152016-11-07 19:49:52 +00001122
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001123 dev_priv->perf.ops.oa_disable(stream);
1124 dev_priv->perf.ops.oa_enable(stream);
Robert Braggd7965152016-11-07 19:49:52 +00001125
Robert Braggd7965152016-11-07 19:49:52 +00001126 oastatus1 = I915_READ(GEN7_OASTATUS1);
Robert Braggd7965152016-11-07 19:49:52 +00001127 }
1128
1129 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1130 ret = append_oa_status(stream, buf, count, offset,
1131 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1132 if (ret)
1133 return ret;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001134 dev_priv->perf.gen7_latched_oastatus1 |=
Robert Braggd7965152016-11-07 19:49:52 +00001135 GEN7_OASTATUS1_REPORT_LOST;
1136 }
1137
Robert Bragg3bb335c2017-05-11 16:43:27 +01001138 return gen7_append_oa_reports(stream, buf, count, offset);
Robert Braggd7965152016-11-07 19:49:52 +00001139}
1140
Robert Bragg16d98b32016-12-07 21:40:33 +00001141/**
1142 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1143 * @stream: An i915-perf stream opened for OA metrics
1144 *
1145 * Called when userspace tries to read() from a blocking stream FD opened
1146 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1147 * OA buffer and wakes us.
1148 *
1149 * Note: it's acceptable to have this return with some false positives
1150 * since any subsequent read handling will return -EAGAIN if there isn't
1151 * really data ready for userspace yet.
1152 *
1153 * Returns: zero on success or a negative error code
1154 */
Robert Braggd7965152016-11-07 19:49:52 +00001155static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1156{
Robert Braggd7965152016-11-07 19:49:52 +00001157 /* We would wait indefinitely if periodic sampling is not enabled */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001158 if (!stream->periodic)
Robert Braggd7965152016-11-07 19:49:52 +00001159 return -EIO;
1160
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001161 return wait_event_interruptible(stream->poll_wq,
1162 oa_buffer_check_unlocked(stream));
Robert Braggd7965152016-11-07 19:49:52 +00001163}
1164
Robert Bragg16d98b32016-12-07 21:40:33 +00001165/**
1166 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1167 * @stream: An i915-perf stream opened for OA metrics
1168 * @file: An i915 perf stream file
1169 * @wait: poll() state table
1170 *
1171 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1172 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1173 * when it sees data ready to read in the circular OA buffer.
1174 */
Robert Braggd7965152016-11-07 19:49:52 +00001175static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1176 struct file *file,
1177 poll_table *wait)
1178{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001179 poll_wait(file, &stream->poll_wq, wait);
Robert Braggd7965152016-11-07 19:49:52 +00001180}
1181
Robert Bragg16d98b32016-12-07 21:40:33 +00001182/**
1183 * i915_oa_read - just calls through to &i915_oa_ops->read
1184 * @stream: An i915-perf stream opened for OA metrics
1185 * @buf: destination buffer given by userspace
1186 * @count: the number of bytes userspace wants to read
1187 * @offset: (inout): the current position for writing into @buf
1188 *
1189 * Updates @offset according to the number of bytes successfully copied into
1190 * the userspace buffer.
1191 *
1192 * Returns: zero on success or a negative error code
1193 */
Robert Braggd7965152016-11-07 19:49:52 +00001194static int i915_oa_read(struct i915_perf_stream *stream,
1195 char __user *buf,
1196 size_t count,
1197 size_t *offset)
1198{
1199 struct drm_i915_private *dev_priv = stream->dev_priv;
1200
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001201 return dev_priv->perf.ops.read(stream, buf, count, offset);
Robert Braggd7965152016-11-07 19:49:52 +00001202}
1203
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001204static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001205{
Chris Wilson5e2a0412019-04-26 17:33:34 +01001206 struct i915_gem_engines_iter it;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001207 struct i915_gem_context *ctx = stream->ctx;
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001208 struct intel_context *ce;
Chris Wilsonfa9f6682019-04-26 17:33:29 +01001209 int err;
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001210
Chris Wilson5e2a0412019-04-26 17:33:34 +01001211 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1212 if (ce->engine->class != RENDER_CLASS)
1213 continue;
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001214
Chris Wilson5e2a0412019-04-26 17:33:34 +01001215 /*
1216 * As the ID is the gtt offset of the context's vma we
1217 * pin the vma to ensure the ID remains fixed.
1218 */
1219 err = intel_context_pin(ce);
1220 if (err == 0) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001221 stream->pinned_ctx = ce;
Chris Wilson5e2a0412019-04-26 17:33:34 +01001222 break;
1223 }
1224 }
1225 i915_gem_context_unlock_engines(ctx);
1226
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001227 return stream->pinned_ctx;
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001228}
1229
Robert Bragg16d98b32016-12-07 21:40:33 +00001230/**
1231 * oa_get_render_ctx_id - determine and hold ctx hw id
1232 * @stream: An i915-perf stream opened for OA metrics
1233 *
1234 * Determine the render context hw id, and ensure it remains fixed for the
Robert Braggd7965152016-11-07 19:49:52 +00001235 * lifetime of the stream. This ensures that we don't have to worry about
1236 * updating the context ID in OACONTROL on the fly.
Robert Bragg16d98b32016-12-07 21:40:33 +00001237 *
1238 * Returns: zero on success or a negative error code
Robert Braggd7965152016-11-07 19:49:52 +00001239 */
1240static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1241{
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001242 struct drm_i915_private *i915 = stream->dev_priv;
1243 struct intel_context *ce;
Robert Braggd7965152016-11-07 19:49:52 +00001244
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001245 ce = oa_pin_context(stream);
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001246 if (IS_ERR(ce))
1247 return PTR_ERR(ce);
Robert Braggd7965152016-11-07 19:49:52 +00001248
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001249 switch (INTEL_GEN(i915)) {
1250 case 7: {
Robert Bragg19f81df2017-06-13 12:23:03 +01001251 /*
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001252 * On Haswell we don't do any post processing of the reports
1253 * and don't need to use the mask.
Robert Bragg19f81df2017-06-13 12:23:03 +01001254 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001255 stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1256 stream->specific_ctx_id_mask = 0;
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001257 break;
Robert Bragg19f81df2017-06-13 12:23:03 +01001258 }
Robert Braggd7965152016-11-07 19:49:52 +00001259
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001260 case 8:
1261 case 9:
1262 case 10:
1263 if (USES_GUC_SUBMISSION(i915)) {
1264 /*
1265 * When using GuC, the context descriptor we write in
1266 * i915 is read by GuC and rewritten before it's
1267 * actually written into the hardware. The LRCA is
1268 * what is put into the context id field of the
1269 * context descriptor by GuC. Because it's aligned to
1270 * a page, the lower 12bits are always at 0 and
1271 * dropped by GuC. They won't be part of the context
1272 * ID in the OA reports, so squash those lower bits.
1273 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001274 stream->specific_ctx_id =
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001275 lower_32_bits(ce->lrc_desc) >> 12;
1276
1277 /*
1278 * GuC uses the top bit to signal proxy submission, so
1279 * ignore that bit.
1280 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001281 stream->specific_ctx_id_mask =
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001282 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1283 } else {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001284 stream->specific_ctx_id_mask =
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001285 (1U << GEN8_CTX_ID_WIDTH) - 1;
Chris Wilson2935ed52019-10-04 14:40:08 +01001286 stream->specific_ctx_id = stream->specific_ctx_id_mask;
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001287 }
1288 break;
1289
Michel Thierry45e9c822019-08-23 01:20:50 -07001290 case 11:
1291 case 12: {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001292 stream->specific_ctx_id_mask =
Chris Wilson2935ed52019-10-04 14:40:08 +01001293 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1294 stream->specific_ctx_id = stream->specific_ctx_id_mask;
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001295 break;
1296 }
1297
1298 default:
1299 MISSING_CASE(INTEL_GEN(i915));
1300 }
1301
Chris Wilson2935ed52019-10-04 14:40:08 +01001302 ce->tag = stream->specific_ctx_id_mask;
1303
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001304 DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001305 stream->specific_ctx_id,
1306 stream->specific_ctx_id_mask);
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001307
Chris Wilson266a2402017-05-04 10:33:08 +01001308 return 0;
Robert Braggd7965152016-11-07 19:49:52 +00001309}
1310
Robert Bragg16d98b32016-12-07 21:40:33 +00001311/**
1312 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1313 * @stream: An i915-perf stream opened for OA metrics
1314 *
1315 * In case anything needed doing to ensure the context HW ID would remain valid
1316 * for the lifetime of the stream, then that can be undone here.
1317 */
Robert Braggd7965152016-11-07 19:49:52 +00001318static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1319{
Chris Wilson1fc44d92018-05-17 22:26:32 +01001320 struct intel_context *ce;
Robert Braggd7965152016-11-07 19:49:52 +00001321
Chris Wilson2935ed52019-10-04 14:40:08 +01001322 ce = fetch_and_zero(&stream->pinned_ctx);
1323 if (ce) {
1324 ce->tag = 0; /* recomputed on next submission after parking */
1325 intel_context_unpin(ce);
1326 }
1327
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001328 stream->specific_ctx_id = INVALID_CTX_ID;
1329 stream->specific_ctx_id_mask = 0;
Robert Braggd7965152016-11-07 19:49:52 +00001330}
1331
1332static void
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001333free_oa_buffer(struct i915_perf_stream *stream)
Robert Braggd7965152016-11-07 19:49:52 +00001334{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001335 i915_vma_unpin_and_release(&stream->oa_buffer.vma,
Chris Wilson6a2f59e2018-07-21 13:50:37 +01001336 I915_VMA_RELEASE_MAP);
Robert Braggd7965152016-11-07 19:49:52 +00001337
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001338 stream->oa_buffer.vaddr = NULL;
Robert Braggd7965152016-11-07 19:49:52 +00001339}
1340
1341static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1342{
1343 struct drm_i915_private *dev_priv = stream->dev_priv;
1344
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001345 BUG_ON(stream != dev_priv->perf.exclusive_stream);
Robert Braggd7965152016-11-07 19:49:52 +00001346
Robert Bragg19f81df2017-06-13 12:23:03 +01001347 /*
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01001348 * Unset exclusive_stream first, it will be checked while disabling
1349 * the metric set on gen8+.
Robert Bragg19f81df2017-06-13 12:23:03 +01001350 */
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001351 mutex_lock(&dev_priv->drm.struct_mutex);
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001352 dev_priv->perf.exclusive_stream = NULL;
1353 dev_priv->perf.ops.disable_metric_set(stream);
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00001354 mutex_unlock(&dev_priv->drm.struct_mutex);
Robert Braggd7965152016-11-07 19:49:52 +00001355
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001356 free_oa_buffer(stream);
Robert Braggd7965152016-11-07 19:49:52 +00001357
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001358 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001359 intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
Robert Braggd7965152016-11-07 19:49:52 +00001360
1361 if (stream->ctx)
1362 oa_put_render_ctx_id(stream);
1363
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01001364 put_oa_config(dev_priv, stream->oa_config);
1365
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001366 if (dev_priv->perf.spurious_report_rs.missed) {
Robert Bragg712122e2017-05-11 16:43:31 +01001367 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001368 dev_priv->perf.spurious_report_rs.missed);
Robert Bragg712122e2017-05-11 16:43:31 +01001369 }
Robert Braggd7965152016-11-07 19:49:52 +00001370}
1371
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001372static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
Robert Braggd7965152016-11-07 19:49:52 +00001373{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001374 struct drm_i915_private *dev_priv = stream->dev_priv;
1375 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
Robert Bragg0dd860c2017-05-11 16:43:28 +01001376 unsigned long flags;
1377
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001378 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
Robert Braggd7965152016-11-07 19:49:52 +00001379
1380 /* Pre-DevBDW: OABUFFER must be set with counters off,
1381 * before OASTATUS1, but after OASTATUS2
1382 */
Lionel Landwerlinb82ed432018-03-26 10:08:26 +01001383 I915_WRITE(GEN7_OASTATUS2,
1384 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001385 stream->oa_buffer.head = gtt_offset;
Robert Braggf2790202017-05-11 16:43:26 +01001386
Robert Braggd7965152016-11-07 19:49:52 +00001387 I915_WRITE(GEN7_OABUFFER, gtt_offset);
Robert Braggf2790202017-05-11 16:43:26 +01001388
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001389 I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
Robert Braggd7965152016-11-07 19:49:52 +00001390
Robert Bragg0dd860c2017-05-11 16:43:28 +01001391 /* Mark that we need updated tail pointers to read from... */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001392 stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1393 stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
Robert Bragg0dd860c2017-05-11 16:43:28 +01001394
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001395 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg0dd860c2017-05-11 16:43:28 +01001396
Robert Braggd7965152016-11-07 19:49:52 +00001397 /* On Haswell we have to track which OASTATUS1 flags we've
1398 * already seen since they can't be cleared while periodic
1399 * sampling is enabled.
1400 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001401 dev_priv->perf.gen7_latched_oastatus1 = 0;
Robert Braggd7965152016-11-07 19:49:52 +00001402
1403 /* NB: although the OA buffer will initially be allocated
1404 * zeroed via shmfs (and so this memset is redundant when
1405 * first allocating), we may re-init the OA buffer, either
1406 * when re-enabling a stream or in error/reset paths.
1407 *
1408 * The reason we clear the buffer for each re-init is for the
1409 * sanity check in gen7_append_oa_reports() that looks at the
1410 * report-id field to make sure it's non-zero which relies on
1411 * the assumption that new reports are being written to zeroed
1412 * memory...
1413 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001414 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
Robert Braggd7965152016-11-07 19:49:52 +00001415
1416 /* Maybe make ->pollin per-stream state if we support multiple
1417 * concurrent streams in the future.
1418 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001419 stream->pollin = false;
Robert Braggd7965152016-11-07 19:49:52 +00001420}
1421
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001422static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
Robert Bragg19f81df2017-06-13 12:23:03 +01001423{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001424 struct drm_i915_private *dev_priv = stream->dev_priv;
1425 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
Robert Bragg19f81df2017-06-13 12:23:03 +01001426 unsigned long flags;
1427
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001428 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg19f81df2017-06-13 12:23:03 +01001429
1430 I915_WRITE(GEN8_OASTATUS, 0);
1431 I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001432 stream->oa_buffer.head = gtt_offset;
Robert Bragg19f81df2017-06-13 12:23:03 +01001433
1434 I915_WRITE(GEN8_OABUFFER_UDW, 0);
1435
1436 /*
1437 * PRM says:
1438 *
1439 * "This MMIO must be set before the OATAILPTR
1440 * register and after the OAHEADPTR register. This is
1441 * to enable proper functionality of the overflow
1442 * bit."
1443 */
1444 I915_WRITE(GEN8_OABUFFER, gtt_offset |
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001445 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
Robert Bragg19f81df2017-06-13 12:23:03 +01001446 I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1447
1448 /* Mark that we need updated tail pointers to read from... */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001449 stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1450 stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
Robert Bragg19f81df2017-06-13 12:23:03 +01001451
1452 /*
1453 * Reset state used to recognise context switches, affecting which
1454 * reports we will forward to userspace while filtering for a single
1455 * context.
1456 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001457 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
Robert Bragg19f81df2017-06-13 12:23:03 +01001458
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001459 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
Robert Bragg19f81df2017-06-13 12:23:03 +01001460
1461 /*
1462 * NB: although the OA buffer will initially be allocated
1463 * zeroed via shmfs (and so this memset is redundant when
1464 * first allocating), we may re-init the OA buffer, either
1465 * when re-enabling a stream or in error/reset paths.
1466 *
1467 * The reason we clear the buffer for each re-init is for the
1468 * sanity check in gen8_append_oa_reports() that looks at the
1469 * reason field to make sure it's non-zero which relies on
1470 * the assumption that new reports are being written to zeroed
1471 * memory...
1472 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001473 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
Robert Bragg19f81df2017-06-13 12:23:03 +01001474
1475 /*
1476 * Maybe make ->pollin per-stream state if we support multiple
1477 * concurrent streams in the future.
1478 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001479 stream->pollin = false;
Robert Bragg19f81df2017-06-13 12:23:03 +01001480}
1481
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001482static int alloc_oa_buffer(struct i915_perf_stream *stream)
Robert Braggd7965152016-11-07 19:49:52 +00001483{
1484 struct drm_i915_gem_object *bo;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001485 struct drm_i915_private *dev_priv = stream->dev_priv;
Robert Braggd7965152016-11-07 19:49:52 +00001486 struct i915_vma *vma;
1487 int ret;
1488
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001489 if (WARN_ON(stream->oa_buffer.vma))
Robert Braggd7965152016-11-07 19:49:52 +00001490 return -ENODEV;
1491
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001492 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1493 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1494
Chris Wilson84753552019-05-28 10:29:45 +01001495 bo = i915_gem_object_create_shmem(dev_priv, OA_BUFFER_SIZE);
Robert Braggd7965152016-11-07 19:49:52 +00001496 if (IS_ERR(bo)) {
1497 DRM_ERROR("Failed to allocate OA buffer\n");
Chris Wilson28507482019-10-04 14:39:58 +01001498 return PTR_ERR(bo);
Robert Braggd7965152016-11-07 19:49:52 +00001499 }
1500
Chris Wilsona679f582019-03-21 16:19:07 +00001501 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
Robert Braggd7965152016-11-07 19:49:52 +00001502
1503 /* PreHSW required 512K alignment, HSW requires 16M */
1504 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1505 if (IS_ERR(vma)) {
1506 ret = PTR_ERR(vma);
1507 goto err_unref;
1508 }
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001509 stream->oa_buffer.vma = vma;
Robert Braggd7965152016-11-07 19:49:52 +00001510
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001511 stream->oa_buffer.vaddr =
Robert Braggd7965152016-11-07 19:49:52 +00001512 i915_gem_object_pin_map(bo, I915_MAP_WB);
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001513 if (IS_ERR(stream->oa_buffer.vaddr)) {
1514 ret = PTR_ERR(stream->oa_buffer.vaddr);
Robert Braggd7965152016-11-07 19:49:52 +00001515 goto err_unpin;
1516 }
1517
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001518 DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001519 i915_ggtt_offset(stream->oa_buffer.vma),
1520 stream->oa_buffer.vaddr);
Robert Braggd7965152016-11-07 19:49:52 +00001521
Chris Wilson28507482019-10-04 14:39:58 +01001522 return 0;
Robert Braggd7965152016-11-07 19:49:52 +00001523
1524err_unpin:
1525 __i915_vma_unpin(vma);
1526
1527err_unref:
1528 i915_gem_object_put(bo);
1529
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001530 stream->oa_buffer.vaddr = NULL;
1531 stream->oa_buffer.vma = NULL;
Robert Braggd7965152016-11-07 19:49:52 +00001532
Robert Braggd7965152016-11-07 19:49:52 +00001533 return ret;
1534}
1535
1536static void config_oa_regs(struct drm_i915_private *dev_priv,
1537 const struct i915_oa_reg *regs,
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001538 u32 n_regs)
Robert Braggd7965152016-11-07 19:49:52 +00001539{
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001540 u32 i;
Robert Braggd7965152016-11-07 19:49:52 +00001541
1542 for (i = 0; i < n_regs; i++) {
1543 const struct i915_oa_reg *reg = regs + i;
1544
1545 I915_WRITE(reg->addr, reg->value);
1546 }
1547}
1548
Lionel Landwerlin14bfcd32019-07-10 11:55:24 +01001549static void delay_after_mux(void)
Robert Braggd7965152016-11-07 19:49:52 +00001550{
Lionel Landwerlin14bfcd32019-07-10 11:55:24 +01001551 /*
1552 * It apparently takes a fairly long time for a new MUX
Robert Braggd7965152016-11-07 19:49:52 +00001553 * configuration to be be applied after these register writes.
1554 * This delay duration was derived empirically based on the
1555 * render_basic config but hopefully it covers the maximum
1556 * configuration latency.
1557 *
1558 * As a fallback, the checks in _append_oa_reports() to skip
1559 * invalid OA reports do also seem to work to discard reports
1560 * generated before this config has completed - albeit not
1561 * silently.
1562 *
1563 * Unfortunately this is essentially a magic number, since we
1564 * don't currently know of a reliable mechanism for predicting
1565 * how long the MUX config will take to apply and besides
1566 * seeing invalid reports we don't know of a reliable way to
1567 * explicitly check that the MUX config has landed.
1568 *
1569 * It's even possible we've miss characterized the underlying
1570 * problem - it just seems like the simplest explanation why
1571 * a delay at this location would mitigate any invalid reports.
1572 */
1573 usleep_range(15000, 20000);
Lionel Landwerlin14bfcd32019-07-10 11:55:24 +01001574}
1575
1576static int hsw_enable_metric_set(struct i915_perf_stream *stream)
1577{
1578 struct drm_i915_private *dev_priv = stream->dev_priv;
1579 const struct i915_oa_config *oa_config = stream->oa_config;
1580
1581 /*
1582 * PRM:
1583 *
1584 * OA unit is using “crclk” for its functionality. When trunk
1585 * level clock gating takes place, OA clock would be gated,
1586 * unable to count the events from non-render clock domain.
1587 * Render clock gating must be disabled when OA is enabled to
1588 * count the events from non-render domain. Unit level clock
1589 * gating for RCS should also be disabled.
1590 */
1591 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1592 ~GEN7_DOP_CLOCK_GATE_ENABLE));
1593 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
1594 GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1595
1596 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
1597 delay_after_mux();
Robert Braggd7965152016-11-07 19:49:52 +00001598
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001599 config_oa_regs(dev_priv, oa_config->b_counter_regs,
1600 oa_config->b_counter_regs_len);
Robert Braggd7965152016-11-07 19:49:52 +00001601
1602 return 0;
1603}
1604
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001605static void hsw_disable_metric_set(struct i915_perf_stream *stream)
Robert Braggd7965152016-11-07 19:49:52 +00001606{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001607 struct drm_i915_private *dev_priv = stream->dev_priv;
1608
Robert Braggd7965152016-11-07 19:49:52 +00001609 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
1610 ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1611 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
1612 GEN7_DOP_CLOCK_GATE_ENABLE));
1613
1614 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1615 ~GT_NOA_ENABLE));
1616}
1617
Chris Wilsona9877da2019-07-16 22:34:43 +01001618static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
1619 i915_reg_t reg)
1620{
1621 u32 mmio = i915_mmio_reg_offset(reg);
1622 int i;
1623
1624 /*
1625 * This arbitrary default will select the 'EU FPU0 Pipeline
1626 * Active' event. In the future it's anticipated that there
1627 * will be an explicit 'No Event' we can select, but not yet...
1628 */
1629 if (!oa_config)
1630 return 0;
1631
1632 for (i = 0; i < oa_config->flex_regs_len; i++) {
1633 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
1634 return oa_config->flex_regs[i].value;
1635 }
1636
1637 return 0;
1638}
Robert Bragg19f81df2017-06-13 12:23:03 +01001639/*
1640 * NB: It must always remain pointer safe to run this even if the OA unit
1641 * has been disabled.
1642 *
1643 * It's fine to put out-of-date values into these per-context registers
1644 * in the case that the OA unit has been disabled.
1645 */
Chris Wilsonb146e5e2019-03-06 08:47:04 +00001646static void
Chris Wilson7dc56af2019-09-24 15:59:50 +01001647gen8_update_reg_state_unlocked(const struct intel_context *ce,
1648 const struct i915_perf_stream *stream)
Robert Bragg19f81df2017-06-13 12:23:03 +01001649{
Chris Wilsoncb0c43f2019-07-30 17:34:41 +01001650 struct drm_i915_private *i915 = ce->engine->i915;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001651 u32 ctx_oactxctrl = i915->perf.ctx_oactxctrl_offset;
1652 u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
Robert Bragg19f81df2017-06-13 12:23:03 +01001653 /* The MMIO offsets for Flex EU registers aren't contiguous */
Lionel Landwerlin35ab4fd2018-08-13 09:02:18 +01001654 i915_reg_t flex_regs[] = {
1655 EU_PERF_CNTL0,
1656 EU_PERF_CNTL1,
1657 EU_PERF_CNTL2,
1658 EU_PERF_CNTL3,
1659 EU_PERF_CNTL4,
1660 EU_PERF_CNTL5,
1661 EU_PERF_CNTL6,
Robert Bragg19f81df2017-06-13 12:23:03 +01001662 };
Chris Wilson7dc56af2019-09-24 15:59:50 +01001663 u32 *reg_state = ce->lrc_reg_state;
Robert Bragg19f81df2017-06-13 12:23:03 +01001664 int i;
1665
Chris Wilson7dc56af2019-09-24 15:59:50 +01001666 reg_state[ctx_oactxctrl + 1] =
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001667 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
1668 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
Chris Wilson7dc56af2019-09-24 15:59:50 +01001669 GEN8_OA_COUNTER_RESUME;
Robert Bragg19f81df2017-06-13 12:23:03 +01001670
Chris Wilson7dc56af2019-09-24 15:59:50 +01001671 for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
1672 reg_state[ctx_flexeu0 + i * 2 + 1] =
1673 oa_config_flex_reg(stream->oa_config, flex_regs[i]);
Lionel Landwerlinec431ea2019-02-05 09:50:29 +00001674
Chris Wilson7dc56af2019-09-24 15:59:50 +01001675 reg_state[CTX_R_PWR_CLK_STATE] = intel_sseu_make_rpcs(i915, &ce->sseu);
Robert Bragg19f81df2017-06-13 12:23:03 +01001676}
1677
Chris Wilsona9877da2019-07-16 22:34:43 +01001678struct flex {
1679 i915_reg_t reg;
1680 u32 offset;
1681 u32 value;
1682};
1683
1684static int
1685gen8_store_flex(struct i915_request *rq,
1686 struct intel_context *ce,
1687 const struct flex *flex, unsigned int count)
1688{
1689 u32 offset;
1690 u32 *cs;
1691
1692 cs = intel_ring_begin(rq, 4 * count);
1693 if (IS_ERR(cs))
1694 return PTR_ERR(cs);
1695
1696 offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
1697 do {
1698 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
Chris Wilson7dc56af2019-09-24 15:59:50 +01001699 *cs++ = offset + flex->offset * sizeof(u32);
Chris Wilsona9877da2019-07-16 22:34:43 +01001700 *cs++ = 0;
1701 *cs++ = flex->value;
1702 } while (flex++, --count);
1703
1704 intel_ring_advance(rq, cs);
1705
1706 return 0;
1707}
1708
1709static int
1710gen8_load_flex(struct i915_request *rq,
1711 struct intel_context *ce,
1712 const struct flex *flex, unsigned int count)
1713{
1714 u32 *cs;
1715
1716 GEM_BUG_ON(!count || count > 63);
1717
1718 cs = intel_ring_begin(rq, 2 * count + 2);
1719 if (IS_ERR(cs))
1720 return PTR_ERR(cs);
1721
1722 *cs++ = MI_LOAD_REGISTER_IMM(count);
1723 do {
1724 *cs++ = i915_mmio_reg_offset(flex->reg);
1725 *cs++ = flex->value;
1726 } while (flex++, --count);
1727 *cs++ = MI_NOOP;
1728
1729 intel_ring_advance(rq, cs);
1730
1731 return 0;
1732}
1733
1734static int gen8_modify_context(struct intel_context *ce,
1735 const struct flex *flex, unsigned int count)
1736{
1737 struct i915_request *rq;
1738 int err;
1739
1740 lockdep_assert_held(&ce->pin_mutex);
1741
1742 rq = i915_request_create(ce->engine->kernel_context);
1743 if (IS_ERR(rq))
1744 return PTR_ERR(rq);
1745
1746 /* Serialise with the remote context */
1747 err = intel_context_prepare_remote_request(ce, rq);
1748 if (err == 0)
1749 err = gen8_store_flex(rq, ce, flex, count);
1750
1751 i915_request_add(rq);
1752 return err;
1753}
1754
1755static int gen8_modify_self(struct intel_context *ce,
1756 const struct flex *flex, unsigned int count)
1757{
1758 struct i915_request *rq;
1759 int err;
1760
1761 rq = i915_request_create(ce);
1762 if (IS_ERR(rq))
1763 return PTR_ERR(rq);
1764
1765 err = gen8_load_flex(rq, ce, flex, count);
1766
1767 i915_request_add(rq);
1768 return err;
1769}
1770
Chris Wilson5cca5032019-07-26 14:14:58 +01001771static int gen8_configure_context(struct i915_gem_context *ctx,
1772 struct flex *flex, unsigned int count)
1773{
1774 struct i915_gem_engines_iter it;
1775 struct intel_context *ce;
1776 int err = 0;
1777
1778 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1779 GEM_BUG_ON(ce == ce->engine->kernel_context);
1780
1781 if (ce->engine->class != RENDER_CLASS)
1782 continue;
1783
1784 err = intel_context_lock_pinned(ce);
1785 if (err)
1786 break;
1787
1788 flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
1789
1790 /* Otherwise OA settings will be set upon first use */
1791 if (intel_context_is_pinned(ce))
1792 err = gen8_modify_context(ce, flex, count);
1793
1794 intel_context_unlock_pinned(ce);
1795 if (err)
1796 break;
1797 }
1798 i915_gem_context_unlock_engines(ctx);
1799
1800 return err;
1801}
1802
Robert Bragg19f81df2017-06-13 12:23:03 +01001803/*
Robert Bragg19f81df2017-06-13 12:23:03 +01001804 * Manages updating the per-context aspects of the OA stream
1805 * configuration across all contexts.
1806 *
1807 * The awkward consideration here is that OACTXCONTROL controls the
1808 * exponent for periodic sampling which is primarily used for system
1809 * wide profiling where we'd like a consistent sampling period even in
1810 * the face of context switches.
1811 *
1812 * Our approach of updating the register state context (as opposed to
1813 * say using a workaround batch buffer) ensures that the hardware
1814 * won't automatically reload an out-of-date timer exponent even
1815 * transiently before a WA BB could be parsed.
1816 *
1817 * This function needs to:
1818 * - Ensure the currently running context's per-context OA state is
1819 * updated
1820 * - Ensure that all existing contexts will have the correct per-context
1821 * OA state if they are scheduled for use.
1822 * - Ensure any new contexts will be initialized with the correct
1823 * per-context OA state.
1824 *
1825 * Note: it's only the RCS/Render context that has any OA state.
1826 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001827static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00001828 const struct i915_oa_config *oa_config)
Robert Bragg19f81df2017-06-13 12:23:03 +01001829{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001830 struct drm_i915_private *i915 = stream->dev_priv;
Chris Wilsona9877da2019-07-16 22:34:43 +01001831 /* The MMIO offsets for Flex EU registers aren't contiguous */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001832 const u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
Chris Wilson7dc56af2019-09-24 15:59:50 +01001833#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
Chris Wilsona9877da2019-07-16 22:34:43 +01001834 struct flex regs[] = {
1835 {
1836 GEN8_R_PWR_CLK_STATE,
1837 CTX_R_PWR_CLK_STATE,
1838 },
1839 {
1840 GEN8_OACTXCONTROL,
Chris Wilson7dc56af2019-09-24 15:59:50 +01001841 i915->perf.ctx_oactxctrl_offset + 1,
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001842 ((stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
1843 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
Chris Wilsona9877da2019-07-16 22:34:43 +01001844 GEN8_OA_COUNTER_RESUME)
1845 },
1846 { EU_PERF_CNTL0, ctx_flexeuN(0) },
1847 { EU_PERF_CNTL1, ctx_flexeuN(1) },
1848 { EU_PERF_CNTL2, ctx_flexeuN(2) },
1849 { EU_PERF_CNTL3, ctx_flexeuN(3) },
1850 { EU_PERF_CNTL4, ctx_flexeuN(4) },
1851 { EU_PERF_CNTL5, ctx_flexeuN(5) },
1852 { EU_PERF_CNTL6, ctx_flexeuN(6) },
1853 };
1854#undef ctx_flexeuN
1855 struct intel_engine_cs *engine;
Robert Bragg19f81df2017-06-13 12:23:03 +01001856 struct i915_gem_context *ctx;
Chris Wilsona9877da2019-07-16 22:34:43 +01001857 int i;
Robert Bragg19f81df2017-06-13 12:23:03 +01001858
Chris Wilsona9877da2019-07-16 22:34:43 +01001859 for (i = 2; i < ARRAY_SIZE(regs); i++)
1860 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
1861
1862 lockdep_assert_held(&i915->drm.struct_mutex);
Robert Bragg19f81df2017-06-13 12:23:03 +01001863
Robert Bragg19f81df2017-06-13 12:23:03 +01001864 /*
1865 * The OA register config is setup through the context image. This image
1866 * might be written to by the GPU on context switch (in particular on
1867 * lite-restore). This means we can't safely update a context's image,
1868 * if this context is scheduled/submitted to run on the GPU.
1869 *
1870 * We could emit the OA register config through the batch buffer but
1871 * this might leave small interval of time where the OA unit is
1872 * configured at an invalid sampling period.
1873 *
Chris Wilsona9877da2019-07-16 22:34:43 +01001874 * Note that since we emit all requests from a single ring, there
1875 * is still an implicit global barrier here that may cause a high
1876 * priority context to wait for an otherwise independent low priority
1877 * context. Contexts idle at the time of reconfiguration are not
1878 * trapped behind the barrier.
Robert Bragg19f81df2017-06-13 12:23:03 +01001879 */
Chris Wilsona9877da2019-07-16 22:34:43 +01001880 list_for_each_entry(ctx, &i915->contexts.list, link) {
Chris Wilson5cca5032019-07-26 14:14:58 +01001881 int err;
Robert Bragg19f81df2017-06-13 12:23:03 +01001882
Chris Wilsona9877da2019-07-16 22:34:43 +01001883 if (ctx == i915->kernel_context)
1884 continue;
1885
Chris Wilson5cca5032019-07-26 14:14:58 +01001886 err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs));
Chris Wilsona9877da2019-07-16 22:34:43 +01001887 if (err)
1888 return err;
Robert Bragg19f81df2017-06-13 12:23:03 +01001889 }
1890
Tvrtko Ursulin722f3de2018-09-12 16:29:30 +01001891 /*
Chris Wilsona9877da2019-07-16 22:34:43 +01001892 * After updating all other contexts, we need to modify ourselves.
1893 * If we don't modify the kernel_context, we do not get events while
1894 * idle.
Tvrtko Ursulin722f3de2018-09-12 16:29:30 +01001895 */
Chris Wilson750e76b2019-08-06 13:43:00 +01001896 for_each_uabi_engine(engine, i915) {
Chris Wilsona9877da2019-07-16 22:34:43 +01001897 struct intel_context *ce = engine->kernel_context;
Chris Wilson5cca5032019-07-26 14:14:58 +01001898 int err;
Tvrtko Ursulin722f3de2018-09-12 16:29:30 +01001899
Chris Wilsona9877da2019-07-16 22:34:43 +01001900 if (engine->class != RENDER_CLASS)
1901 continue;
1902
1903 regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
1904
1905 err = gen8_modify_self(ce, regs, ARRAY_SIZE(regs));
1906 if (err)
1907 return err;
1908 }
Tvrtko Ursulin722f3de2018-09-12 16:29:30 +01001909
1910 return 0;
Robert Bragg19f81df2017-06-13 12:23:03 +01001911}
1912
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001913static int gen8_enable_metric_set(struct i915_perf_stream *stream)
Robert Bragg19f81df2017-06-13 12:23:03 +01001914{
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001915 struct drm_i915_private *dev_priv = stream->dev_priv;
1916 const struct i915_oa_config *oa_config = stream->oa_config;
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001917 int ret;
Robert Bragg19f81df2017-06-13 12:23:03 +01001918
1919 /*
1920 * We disable slice/unslice clock ratio change reports on SKL since
1921 * they are too noisy. The HW generates a lot of redundant reports
1922 * where the ratio hasn't really changed causing a lot of redundant
1923 * work to processes and increasing the chances we'll hit buffer
1924 * overruns.
1925 *
1926 * Although we don't currently use the 'disable overrun' OABUFFER
1927 * feature it's worth noting that clock ratio reports have to be
1928 * disabled before considering to use that feature since the HW doesn't
1929 * correctly block these reports.
1930 *
1931 * Currently none of the high-level metrics we have depend on knowing
1932 * this ratio to normalize.
1933 *
1934 * Note: This register is not power context saved and restored, but
1935 * that's OK considering that we disable RC6 while the OA unit is
1936 * enabled.
1937 *
1938 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
1939 * be read back from automatically triggered reports, as part of the
1940 * RPT_ID field.
1941 */
Lucas De Marchi00690002018-12-12 10:10:42 -08001942 if (IS_GEN_RANGE(dev_priv, 9, 11)) {
Robert Bragg19f81df2017-06-13 12:23:03 +01001943 I915_WRITE(GEN8_OA_DEBUG,
1944 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
1945 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
1946 }
1947
1948 /*
1949 * Update all contexts prior writing the mux configurations as we need
1950 * to make sure all slices/subslices are ON before writing to NOA
1951 * registers.
1952 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001953 ret = gen8_configure_all_contexts(stream, oa_config);
Robert Bragg19f81df2017-06-13 12:23:03 +01001954 if (ret)
1955 return ret;
1956
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001957 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
Lionel Landwerlin14bfcd32019-07-10 11:55:24 +01001958 delay_after_mux();
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001959
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001960 config_oa_regs(dev_priv, oa_config->b_counter_regs,
1961 oa_config->b_counter_regs_len);
Robert Bragg19f81df2017-06-13 12:23:03 +01001962
1963 return 0;
1964}
1965
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001966static void gen8_disable_metric_set(struct i915_perf_stream *stream)
Robert Bragg19f81df2017-06-13 12:23:03 +01001967{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001968 struct drm_i915_private *dev_priv = stream->dev_priv;
1969
Robert Bragg19f81df2017-06-13 12:23:03 +01001970 /* Reset all contexts' slices/subslices configurations. */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001971 gen8_configure_all_contexts(stream, NULL);
Lionel Landwerlin28964cf2017-08-03 17:58:10 +01001972
1973 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1974 ~GT_NOA_ENABLE));
Robert Bragg19f81df2017-06-13 12:23:03 +01001975}
1976
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001977static void gen10_disable_metric_set(struct i915_perf_stream *stream)
Lionel Landwerlin95690a02017-11-10 19:08:43 +00001978{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001979 struct drm_i915_private *dev_priv = stream->dev_priv;
1980
Lionel Landwerlin95690a02017-11-10 19:08:43 +00001981 /* Reset all contexts' slices/subslices configurations. */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001982 gen8_configure_all_contexts(stream, NULL);
Lionel Landwerlin95690a02017-11-10 19:08:43 +00001983
1984 /* Make sure we disable noa to save power. */
1985 I915_WRITE(RPM_CONFIG1,
1986 I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
1987}
1988
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001989static void gen7_oa_enable(struct i915_perf_stream *stream)
Robert Braggd7965152016-11-07 19:49:52 +00001990{
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001991 struct drm_i915_private *dev_priv = stream->dev_priv;
1992 struct i915_gem_context *ctx = stream->ctx;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07001993 u32 ctx_id = stream->specific_ctx_id;
1994 bool periodic = stream->periodic;
1995 u32 period_exponent = stream->period_exponent;
1996 u32 report_format = stream->oa_buffer.format;
Lionel Landwerlin11051302018-03-26 10:08:23 +01001997
Robert Bragg1bef3402017-06-13 12:23:06 +01001998 /*
1999 * Reset buf pointers so we don't forward reports from before now.
2000 *
2001 * Think carefully if considering trying to avoid this, since it
2002 * also ensures status flags and the buffer itself are cleared
2003 * in error paths, and we have checks for invalid reports based
2004 * on the assumption that certain fields are written to zeroed
2005 * memory which this helps maintains.
2006 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002007 gen7_init_oa_buffer(stream);
Robert Braggd7965152016-11-07 19:49:52 +00002008
Lionel Landwerlin11051302018-03-26 10:08:23 +01002009 I915_WRITE(GEN7_OACONTROL,
2010 (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2011 (period_exponent <<
2012 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2013 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2014 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2015 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2016 GEN7_OACONTROL_ENABLE);
Robert Braggd7965152016-11-07 19:49:52 +00002017}
2018
Lionel Landwerlin5728de22018-10-23 11:07:06 +01002019static void gen8_oa_enable(struct i915_perf_stream *stream)
Robert Bragg19f81df2017-06-13 12:23:03 +01002020{
Lionel Landwerlin5728de22018-10-23 11:07:06 +01002021 struct drm_i915_private *dev_priv = stream->dev_priv;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002022 u32 report_format = stream->oa_buffer.format;
Robert Bragg19f81df2017-06-13 12:23:03 +01002023
2024 /*
2025 * Reset buf pointers so we don't forward reports from before now.
2026 *
2027 * Think carefully if considering trying to avoid this, since it
2028 * also ensures status flags and the buffer itself are cleared
2029 * in error paths, and we have checks for invalid reports based
2030 * on the assumption that certain fields are written to zeroed
2031 * memory which this helps maintains.
2032 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002033 gen8_init_oa_buffer(stream);
Robert Bragg19f81df2017-06-13 12:23:03 +01002034
2035 /*
2036 * Note: we don't rely on the hardware to perform single context
2037 * filtering and instead filter on the cpu based on the context-id
2038 * field of reports
2039 */
2040 I915_WRITE(GEN8_OACONTROL, (report_format <<
2041 GEN8_OA_REPORT_FORMAT_SHIFT) |
2042 GEN8_OA_COUNTER_ENABLE);
2043}
2044
Robert Bragg16d98b32016-12-07 21:40:33 +00002045/**
2046 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2047 * @stream: An i915 perf stream opened for OA metrics
2048 *
2049 * [Re]enables hardware periodic sampling according to the period configured
2050 * when opening the stream. This also starts a hrtimer that will periodically
2051 * check for data in the circular OA buffer for notifying userspace (e.g.
2052 * during a read() or poll()).
2053 */
Robert Braggd7965152016-11-07 19:49:52 +00002054static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2055{
2056 struct drm_i915_private *dev_priv = stream->dev_priv;
2057
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002058 dev_priv->perf.ops.oa_enable(stream);
Robert Braggd7965152016-11-07 19:49:52 +00002059
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002060 if (stream->periodic)
2061 hrtimer_start(&stream->poll_check_timer,
Robert Braggd7965152016-11-07 19:49:52 +00002062 ns_to_ktime(POLL_PERIOD),
2063 HRTIMER_MODE_REL_PINNED);
2064}
2065
Lionel Landwerlin5728de22018-10-23 11:07:06 +01002066static void gen7_oa_disable(struct i915_perf_stream *stream)
Robert Braggd7965152016-11-07 19:49:52 +00002067{
Daniele Ceraolo Spurio97a04e02019-03-25 14:49:39 -07002068 struct intel_uncore *uncore = &stream->dev_priv->uncore;
Lionel Landwerlin5728de22018-10-23 11:07:06 +01002069
Daniele Ceraolo Spurio97a04e02019-03-25 14:49:39 -07002070 intel_uncore_write(uncore, GEN7_OACONTROL, 0);
2071 if (intel_wait_for_register(uncore,
Chris Wilsone896d292018-05-11 14:52:07 +01002072 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2073 50))
2074 DRM_ERROR("wait for OA to be disabled timed out\n");
Robert Braggd7965152016-11-07 19:49:52 +00002075}
2076
Lionel Landwerlin5728de22018-10-23 11:07:06 +01002077static void gen8_oa_disable(struct i915_perf_stream *stream)
Robert Bragg19f81df2017-06-13 12:23:03 +01002078{
Daniele Ceraolo Spurio97a04e02019-03-25 14:49:39 -07002079 struct intel_uncore *uncore = &stream->dev_priv->uncore;
Lionel Landwerlin5728de22018-10-23 11:07:06 +01002080
Daniele Ceraolo Spurio97a04e02019-03-25 14:49:39 -07002081 intel_uncore_write(uncore, GEN8_OACONTROL, 0);
2082 if (intel_wait_for_register(uncore,
Chris Wilsone896d292018-05-11 14:52:07 +01002083 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2084 50))
2085 DRM_ERROR("wait for OA to be disabled timed out\n");
Robert Bragg19f81df2017-06-13 12:23:03 +01002086}
2087
Robert Bragg16d98b32016-12-07 21:40:33 +00002088/**
2089 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2090 * @stream: An i915 perf stream opened for OA metrics
2091 *
2092 * Stops the OA unit from periodically writing counter reports into the
2093 * circular OA buffer. This also stops the hrtimer that periodically checks for
2094 * data in the circular OA buffer, for notifying userspace.
2095 */
Robert Braggd7965152016-11-07 19:49:52 +00002096static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2097{
2098 struct drm_i915_private *dev_priv = stream->dev_priv;
2099
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002100 dev_priv->perf.ops.oa_disable(stream);
Robert Braggd7965152016-11-07 19:49:52 +00002101
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002102 if (stream->periodic)
2103 hrtimer_cancel(&stream->poll_check_timer);
Robert Braggd7965152016-11-07 19:49:52 +00002104}
2105
Robert Braggd7965152016-11-07 19:49:52 +00002106static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2107 .destroy = i915_oa_stream_destroy,
2108 .enable = i915_oa_stream_enable,
2109 .disable = i915_oa_stream_disable,
2110 .wait_unlocked = i915_oa_wait_unlocked,
2111 .poll_wait = i915_oa_poll_wait,
2112 .read = i915_oa_read,
2113};
2114
Robert Bragg16d98b32016-12-07 21:40:33 +00002115/**
2116 * i915_oa_stream_init - validate combined props for OA stream and init
2117 * @stream: An i915 perf stream
2118 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2119 * @props: The property state that configures stream (individually validated)
2120 *
2121 * While read_properties_unlocked() validates properties in isolation it
2122 * doesn't ensure that the combination necessarily makes sense.
2123 *
2124 * At this point it has been determined that userspace wants a stream of
2125 * OA metrics, but still we need to further validate the combined
2126 * properties are OK.
2127 *
2128 * If the configuration makes sense then we can allocate memory for
2129 * a circular OA buffer and apply the requested metric set configuration.
2130 *
2131 * Returns: zero on success or a negative error code.
2132 */
Robert Braggd7965152016-11-07 19:49:52 +00002133static int i915_oa_stream_init(struct i915_perf_stream *stream,
2134 struct drm_i915_perf_open_param *param,
2135 struct perf_open_properties *props)
2136{
2137 struct drm_i915_private *dev_priv = stream->dev_priv;
2138 int format_size;
2139 int ret;
2140
Robert Bragg442b8c02016-11-07 19:49:53 +00002141 /* If the sysfs metrics/ directory wasn't registered for some
2142 * reason then don't let userspace try their luck with config
2143 * IDs
2144 */
2145 if (!dev_priv->perf.metrics_kobj) {
Robert Bragg77085502016-12-01 17:21:52 +00002146 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
Robert Bragg442b8c02016-11-07 19:49:53 +00002147 return -EINVAL;
2148 }
2149
Robert Braggd7965152016-11-07 19:49:52 +00002150 if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
Robert Bragg77085502016-12-01 17:21:52 +00002151 DRM_DEBUG("Only OA report sampling supported\n");
Robert Braggd7965152016-11-07 19:49:52 +00002152 return -EINVAL;
2153 }
2154
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002155 if (!dev_priv->perf.ops.enable_metric_set) {
Robert Bragg77085502016-12-01 17:21:52 +00002156 DRM_DEBUG("OA unit not supported\n");
Robert Braggd7965152016-11-07 19:49:52 +00002157 return -ENODEV;
2158 }
2159
2160 /* To avoid the complexity of having to accurately filter
2161 * counter reports and marshal to the appropriate client
2162 * we currently only allow exclusive access
2163 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002164 if (dev_priv->perf.exclusive_stream) {
Robert Bragg77085502016-12-01 17:21:52 +00002165 DRM_DEBUG("OA unit already in use\n");
Robert Braggd7965152016-11-07 19:49:52 +00002166 return -EBUSY;
2167 }
2168
Robert Braggd7965152016-11-07 19:49:52 +00002169 if (!props->oa_format) {
Robert Bragg77085502016-12-01 17:21:52 +00002170 DRM_DEBUG("OA report format not specified\n");
Robert Braggd7965152016-11-07 19:49:52 +00002171 return -EINVAL;
2172 }
2173
2174 stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2175
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002176 format_size = dev_priv->perf.oa_formats[props->oa_format].size;
Robert Braggd7965152016-11-07 19:49:52 +00002177
2178 stream->sample_flags |= SAMPLE_OA_REPORT;
2179 stream->sample_size += format_size;
2180
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002181 stream->oa_buffer.format_size = format_size;
2182 if (WARN_ON(stream->oa_buffer.format_size == 0))
Robert Braggd7965152016-11-07 19:49:52 +00002183 return -EINVAL;
2184
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002185 stream->oa_buffer.format =
2186 dev_priv->perf.oa_formats[props->oa_format].format;
Robert Braggd7965152016-11-07 19:49:52 +00002187
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002188 stream->periodic = props->oa_periodic;
2189 if (stream->periodic)
2190 stream->period_exponent = props->oa_period_exponent;
Robert Braggd7965152016-11-07 19:49:52 +00002191
Robert Braggd7965152016-11-07 19:49:52 +00002192 if (stream->ctx) {
2193 ret = oa_get_render_ctx_id(stream);
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002194 if (ret) {
2195 DRM_DEBUG("Invalid context id to filter with\n");
Robert Braggd7965152016-11-07 19:49:52 +00002196 return ret;
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002197 }
Robert Braggd7965152016-11-07 19:49:52 +00002198 }
2199
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002200 ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002201 if (ret) {
2202 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002203 goto err_config;
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002204 }
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002205
Robert Braggd7965152016-11-07 19:49:52 +00002206 /* PRM - observability performance counters:
2207 *
2208 * OACONTROL, performance counter enable, note:
2209 *
2210 * "When this bit is set, in order to have coherent counts,
2211 * RC6 power state and trunk clock gating must be disabled.
2212 * This can be achieved by programming MMIO registers as
2213 * 0xA094=0 and 0xA090[31]=1"
2214 *
2215 * In our case we are expecting that taking pm + FORCEWAKE
2216 * references will effectively disable RC6.
2217 */
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002218 stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07002219 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Robert Braggd7965152016-11-07 19:49:52 +00002220
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002221 ret = alloc_oa_buffer(stream);
sagar.a.kamble@intel.com987f8c42017-06-27 23:09:41 +05302222 if (ret)
2223 goto err_oa_buf_alloc;
2224
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00002225 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2226 if (ret)
2227 goto err_lock;
2228
Lionel Landwerlinec431ea2019-02-05 09:50:29 +00002229 stream->ops = &i915_oa_stream_ops;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002230 dev_priv->perf.exclusive_stream = stream;
Lionel Landwerlinec431ea2019-02-05 09:50:29 +00002231
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002232 ret = dev_priv->perf.ops.enable_metric_set(stream);
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002233 if (ret) {
2234 DRM_DEBUG("Unable to enable metric set\n");
Robert Braggd7965152016-11-07 19:49:52 +00002235 goto err_enable;
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002236 }
Robert Braggd7965152016-11-07 19:49:52 +00002237
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002238 mutex_unlock(&dev_priv->drm.struct_mutex);
2239
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002240 hrtimer_init(&stream->poll_check_timer,
2241 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2242 stream->poll_check_timer.function = oa_poll_check_timer_cb;
2243 init_waitqueue_head(&stream->poll_wq);
2244 spin_lock_init(&stream->oa_buffer.ptr_lock);
2245
Robert Braggd7965152016-11-07 19:49:52 +00002246 return 0;
2247
2248err_enable:
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002249 dev_priv->perf.exclusive_stream = NULL;
2250 dev_priv->perf.ops.disable_metric_set(stream);
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00002251 mutex_unlock(&dev_priv->drm.struct_mutex);
2252
2253err_lock:
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002254 free_oa_buffer(stream);
Robert Braggd7965152016-11-07 19:49:52 +00002255
2256err_oa_buf_alloc:
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002257 put_oa_config(dev_priv, stream->oa_config);
2258
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07002259 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002260 intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002261
2262err_config:
Robert Braggd7965152016-11-07 19:49:52 +00002263 if (stream->ctx)
2264 oa_put_render_ctx_id(stream);
2265
2266 return ret;
2267}
2268
Chris Wilson7dc56af2019-09-24 15:59:50 +01002269void i915_oa_init_reg_state(const struct intel_context *ce,
2270 const struct intel_engine_cs *engine)
Robert Bragg19f81df2017-06-13 12:23:03 +01002271{
Chris Wilson28b6cb02017-08-10 18:57:43 +01002272 struct i915_perf_stream *stream;
Robert Bragg19f81df2017-06-13 12:23:03 +01002273
Chris Wilsondffa8fe2019-08-30 19:19:29 +01002274 /* perf.exclusive_stream serialised by gen8_configure_all_contexts() */
2275 lockdep_assert_held(&ce->pin_mutex);
2276
Chris Wilson8a68d462019-03-05 18:03:30 +00002277 if (engine->class != RENDER_CLASS)
Robert Bragg19f81df2017-06-13 12:23:03 +01002278 return;
2279
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002280 stream = engine->i915->perf.exclusive_stream;
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002281 if (stream)
Chris Wilson7dc56af2019-09-24 15:59:50 +01002282 gen8_update_reg_state_unlocked(ce, stream);
Robert Bragg19f81df2017-06-13 12:23:03 +01002283}
2284
Robert Bragg16d98b32016-12-07 21:40:33 +00002285/**
2286 * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
2287 * @stream: An i915 perf stream
2288 * @file: An i915 perf stream file
2289 * @buf: destination buffer given by userspace
2290 * @count: the number of bytes userspace wants to read
2291 * @ppos: (inout) file seek position (unused)
2292 *
2293 * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
2294 * ensure that if we've successfully copied any data then reporting that takes
2295 * precedence over any internal error status, so the data isn't lost.
2296 *
2297 * For example ret will be -ENOSPC whenever there is more buffered data than
2298 * can be copied to userspace, but that's only interesting if we weren't able
2299 * to copy some data because it implies the userspace buffer is too small to
2300 * receive a single record (and we never split records).
2301 *
2302 * Another case with ret == -EFAULT is more of a grey area since it would seem
2303 * like bad form for userspace to ask us to overrun its buffer, but the user
2304 * knows best:
2305 *
2306 * http://yarchive.net/comp/linux/partial_reads_writes.html
2307 *
2308 * Returns: The number of bytes copied or a negative error code on failure.
2309 */
Robert Braggeec688e2016-11-07 19:49:47 +00002310static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
2311 struct file *file,
2312 char __user *buf,
2313 size_t count,
2314 loff_t *ppos)
2315{
2316 /* Note we keep the offset (aka bytes read) separate from any
2317 * error status so that the final check for whether we return
2318 * the bytes read with a higher precedence than any error (see
2319 * comment below) doesn't need to be handled/duplicated in
2320 * stream->ops->read() implementations.
2321 */
2322 size_t offset = 0;
2323 int ret = stream->ops->read(stream, buf, count, &offset);
2324
Robert Braggeec688e2016-11-07 19:49:47 +00002325 return offset ?: (ret ?: -EAGAIN);
2326}
2327
Robert Bragg16d98b32016-12-07 21:40:33 +00002328/**
2329 * i915_perf_read - handles read() FOP for i915 perf stream FDs
2330 * @file: An i915 perf stream file
2331 * @buf: destination buffer given by userspace
2332 * @count: the number of bytes userspace wants to read
2333 * @ppos: (inout) file seek position (unused)
2334 *
2335 * The entry point for handling a read() on a stream file descriptor from
2336 * userspace. Most of the work is left to the i915_perf_read_locked() and
2337 * &i915_perf_stream_ops->read but to save having stream implementations (of
2338 * which we might have multiple later) we handle blocking read here.
2339 *
2340 * We can also consistently treat trying to read from a disabled stream
2341 * as an IO error so implementations can assume the stream is enabled
2342 * while reading.
2343 *
2344 * Returns: The number of bytes copied or a negative error code on failure.
2345 */
Robert Braggeec688e2016-11-07 19:49:47 +00002346static ssize_t i915_perf_read(struct file *file,
2347 char __user *buf,
2348 size_t count,
2349 loff_t *ppos)
2350{
2351 struct i915_perf_stream *stream = file->private_data;
2352 struct drm_i915_private *dev_priv = stream->dev_priv;
2353 ssize_t ret;
2354
Robert Braggd7965152016-11-07 19:49:52 +00002355 /* To ensure it's handled consistently we simply treat all reads of a
2356 * disabled stream as an error. In particular it might otherwise lead
2357 * to a deadlock for blocking file descriptors...
2358 */
2359 if (!stream->enabled)
2360 return -EIO;
2361
Robert Braggeec688e2016-11-07 19:49:47 +00002362 if (!(file->f_flags & O_NONBLOCK)) {
Robert Braggd7965152016-11-07 19:49:52 +00002363 /* There's the small chance of false positives from
2364 * stream->ops->wait_unlocked.
2365 *
2366 * E.g. with single context filtering since we only wait until
2367 * oabuffer has >= 1 report we don't immediately know whether
2368 * any reports really belong to the current context
Robert Braggeec688e2016-11-07 19:49:47 +00002369 */
2370 do {
2371 ret = stream->ops->wait_unlocked(stream);
2372 if (ret)
2373 return ret;
2374
2375 mutex_lock(&dev_priv->perf.lock);
2376 ret = i915_perf_read_locked(stream, file,
2377 buf, count, ppos);
2378 mutex_unlock(&dev_priv->perf.lock);
2379 } while (ret == -EAGAIN);
2380 } else {
2381 mutex_lock(&dev_priv->perf.lock);
2382 ret = i915_perf_read_locked(stream, file, buf, count, ppos);
2383 mutex_unlock(&dev_priv->perf.lock);
2384 }
2385
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002386 /* We allow the poll checking to sometimes report false positive EPOLLIN
Robert Bragg26ebd9c2017-05-11 16:43:25 +01002387 * events where we might actually report EAGAIN on read() if there's
2388 * not really any data available. In this situation though we don't
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002389 * want to enter a busy loop between poll() reporting a EPOLLIN event
Robert Bragg26ebd9c2017-05-11 16:43:25 +01002390 * and read() returning -EAGAIN. Clearing the oa.pollin state here
2391 * effectively ensures we back off until the next hrtimer callback
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002392 * before reporting another EPOLLIN event.
Robert Bragg26ebd9c2017-05-11 16:43:25 +01002393 */
2394 if (ret >= 0 || ret == -EAGAIN) {
Robert Braggd7965152016-11-07 19:49:52 +00002395 /* Maybe make ->pollin per-stream state if we support multiple
2396 * concurrent streams in the future.
2397 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002398 stream->pollin = false;
Robert Braggd7965152016-11-07 19:49:52 +00002399 }
2400
Robert Braggeec688e2016-11-07 19:49:47 +00002401 return ret;
2402}
2403
Robert Braggd7965152016-11-07 19:49:52 +00002404static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
2405{
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002406 struct i915_perf_stream *stream =
2407 container_of(hrtimer, typeof(*stream), poll_check_timer);
Robert Braggd7965152016-11-07 19:49:52 +00002408
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002409 if (oa_buffer_check_unlocked(stream)) {
2410 stream->pollin = true;
2411 wake_up(&stream->poll_wq);
Robert Braggd7965152016-11-07 19:49:52 +00002412 }
2413
2414 hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
2415
2416 return HRTIMER_RESTART;
2417}
2418
Robert Bragg16d98b32016-12-07 21:40:33 +00002419/**
2420 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
2421 * @dev_priv: i915 device instance
2422 * @stream: An i915 perf stream
2423 * @file: An i915 perf stream file
2424 * @wait: poll() state table
2425 *
2426 * For handling userspace polling on an i915 perf stream, this calls through to
2427 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
2428 * will be woken for new stream data.
2429 *
2430 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2431 * with any non-file-operation driver hooks.
2432 *
2433 * Returns: any poll events that are ready without sleeping
2434 */
Al Viroafc9a422017-07-03 06:39:46 -04002435static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
Robert Braggd7965152016-11-07 19:49:52 +00002436 struct i915_perf_stream *stream,
Robert Braggeec688e2016-11-07 19:49:47 +00002437 struct file *file,
2438 poll_table *wait)
2439{
Al Viroafc9a422017-07-03 06:39:46 -04002440 __poll_t events = 0;
Robert Braggeec688e2016-11-07 19:49:47 +00002441
2442 stream->ops->poll_wait(stream, file, wait);
2443
Robert Braggd7965152016-11-07 19:49:52 +00002444 /* Note: we don't explicitly check whether there's something to read
2445 * here since this path may be very hot depending on what else
2446 * userspace is polling, or on the timeout in use. We rely solely on
2447 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
2448 * samples to read.
2449 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002450 if (stream->pollin)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002451 events |= EPOLLIN;
Robert Braggeec688e2016-11-07 19:49:47 +00002452
Robert Braggd7965152016-11-07 19:49:52 +00002453 return events;
Robert Braggeec688e2016-11-07 19:49:47 +00002454}
2455
Robert Bragg16d98b32016-12-07 21:40:33 +00002456/**
2457 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
2458 * @file: An i915 perf stream file
2459 * @wait: poll() state table
2460 *
2461 * For handling userspace polling on an i915 perf stream, this ensures
2462 * poll_wait() gets called with a wait queue that will be woken for new stream
2463 * data.
2464 *
2465 * Note: Implementation deferred to i915_perf_poll_locked()
2466 *
2467 * Returns: any poll events that are ready without sleeping
2468 */
Al Viroafc9a422017-07-03 06:39:46 -04002469static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
Robert Braggeec688e2016-11-07 19:49:47 +00002470{
2471 struct i915_perf_stream *stream = file->private_data;
2472 struct drm_i915_private *dev_priv = stream->dev_priv;
Al Viroafc9a422017-07-03 06:39:46 -04002473 __poll_t ret;
Robert Braggeec688e2016-11-07 19:49:47 +00002474
2475 mutex_lock(&dev_priv->perf.lock);
Robert Braggd7965152016-11-07 19:49:52 +00002476 ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
Robert Braggeec688e2016-11-07 19:49:47 +00002477 mutex_unlock(&dev_priv->perf.lock);
2478
2479 return ret;
2480}
2481
Robert Bragg16d98b32016-12-07 21:40:33 +00002482/**
2483 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
2484 * @stream: A disabled i915 perf stream
2485 *
2486 * [Re]enables the associated capture of data for this stream.
2487 *
2488 * If a stream was previously enabled then there's currently no intention
2489 * to provide userspace any guarantee about the preservation of previously
2490 * buffered data.
2491 */
Robert Braggeec688e2016-11-07 19:49:47 +00002492static void i915_perf_enable_locked(struct i915_perf_stream *stream)
2493{
2494 if (stream->enabled)
2495 return;
2496
2497 /* Allow stream->ops->enable() to refer to this */
2498 stream->enabled = true;
2499
2500 if (stream->ops->enable)
2501 stream->ops->enable(stream);
2502}
2503
Robert Bragg16d98b32016-12-07 21:40:33 +00002504/**
2505 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
2506 * @stream: An enabled i915 perf stream
2507 *
2508 * Disables the associated capture of data for this stream.
2509 *
2510 * The intention is that disabling an re-enabling a stream will ideally be
2511 * cheaper than destroying and re-opening a stream with the same configuration,
2512 * though there are no formal guarantees about what state or buffered data
2513 * must be retained between disabling and re-enabling a stream.
2514 *
2515 * Note: while a stream is disabled it's considered an error for userspace
2516 * to attempt to read from the stream (-EIO).
2517 */
Robert Braggeec688e2016-11-07 19:49:47 +00002518static void i915_perf_disable_locked(struct i915_perf_stream *stream)
2519{
2520 if (!stream->enabled)
2521 return;
2522
2523 /* Allow stream->ops->disable() to refer to this */
2524 stream->enabled = false;
2525
2526 if (stream->ops->disable)
2527 stream->ops->disable(stream);
2528}
2529
Robert Bragg16d98b32016-12-07 21:40:33 +00002530/**
2531 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
2532 * @stream: An i915 perf stream
2533 * @cmd: the ioctl request
2534 * @arg: the ioctl data
2535 *
2536 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2537 * with any non-file-operation driver hooks.
2538 *
2539 * Returns: zero on success or a negative error code. Returns -EINVAL for
2540 * an unknown ioctl request.
2541 */
Robert Braggeec688e2016-11-07 19:49:47 +00002542static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
2543 unsigned int cmd,
2544 unsigned long arg)
2545{
2546 switch (cmd) {
2547 case I915_PERF_IOCTL_ENABLE:
2548 i915_perf_enable_locked(stream);
2549 return 0;
2550 case I915_PERF_IOCTL_DISABLE:
2551 i915_perf_disable_locked(stream);
2552 return 0;
2553 }
2554
2555 return -EINVAL;
2556}
2557
Robert Bragg16d98b32016-12-07 21:40:33 +00002558/**
2559 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
2560 * @file: An i915 perf stream file
2561 * @cmd: the ioctl request
2562 * @arg: the ioctl data
2563 *
2564 * Implementation deferred to i915_perf_ioctl_locked().
2565 *
2566 * Returns: zero on success or a negative error code. Returns -EINVAL for
2567 * an unknown ioctl request.
2568 */
Robert Braggeec688e2016-11-07 19:49:47 +00002569static long i915_perf_ioctl(struct file *file,
2570 unsigned int cmd,
2571 unsigned long arg)
2572{
2573 struct i915_perf_stream *stream = file->private_data;
2574 struct drm_i915_private *dev_priv = stream->dev_priv;
2575 long ret;
2576
2577 mutex_lock(&dev_priv->perf.lock);
2578 ret = i915_perf_ioctl_locked(stream, cmd, arg);
2579 mutex_unlock(&dev_priv->perf.lock);
2580
2581 return ret;
2582}
2583
Robert Bragg16d98b32016-12-07 21:40:33 +00002584/**
2585 * i915_perf_destroy_locked - destroy an i915 perf stream
2586 * @stream: An i915 perf stream
2587 *
2588 * Frees all resources associated with the given i915 perf @stream, disabling
2589 * any associated data capture in the process.
2590 *
2591 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2592 * with any non-file-operation driver hooks.
2593 */
Robert Braggeec688e2016-11-07 19:49:47 +00002594static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
2595{
Robert Braggeec688e2016-11-07 19:49:47 +00002596 if (stream->enabled)
2597 i915_perf_disable_locked(stream);
2598
2599 if (stream->ops->destroy)
2600 stream->ops->destroy(stream);
2601
2602 list_del(&stream->link);
2603
Chris Wilson69df05e2016-12-18 15:37:21 +00002604 if (stream->ctx)
Chris Wilson5f09a9c2017-06-20 12:05:46 +01002605 i915_gem_context_put(stream->ctx);
Robert Braggeec688e2016-11-07 19:49:47 +00002606
2607 kfree(stream);
2608}
2609
Robert Bragg16d98b32016-12-07 21:40:33 +00002610/**
2611 * i915_perf_release - handles userspace close() of a stream file
2612 * @inode: anonymous inode associated with file
2613 * @file: An i915 perf stream file
2614 *
2615 * Cleans up any resources associated with an open i915 perf stream file.
2616 *
2617 * NB: close() can't really fail from the userspace point of view.
2618 *
2619 * Returns: zero on success or a negative error code.
2620 */
Robert Braggeec688e2016-11-07 19:49:47 +00002621static int i915_perf_release(struct inode *inode, struct file *file)
2622{
2623 struct i915_perf_stream *stream = file->private_data;
2624 struct drm_i915_private *dev_priv = stream->dev_priv;
2625
2626 mutex_lock(&dev_priv->perf.lock);
2627 i915_perf_destroy_locked(stream);
2628 mutex_unlock(&dev_priv->perf.lock);
2629
Lionel Landwerlina5af1df2019-07-09 15:33:39 +03002630 /* Release the reference the perf stream kept on the driver. */
2631 drm_dev_put(&dev_priv->drm);
2632
Robert Braggeec688e2016-11-07 19:49:47 +00002633 return 0;
2634}
2635
2636
2637static const struct file_operations fops = {
2638 .owner = THIS_MODULE,
2639 .llseek = no_llseek,
2640 .release = i915_perf_release,
2641 .poll = i915_perf_poll,
2642 .read = i915_perf_read,
2643 .unlocked_ioctl = i915_perf_ioctl,
Lionel Landwerlin191f8962017-10-24 16:27:28 +01002644 /* Our ioctl have no arguments, so it's safe to use the same function
2645 * to handle 32bits compatibility.
2646 */
2647 .compat_ioctl = i915_perf_ioctl,
Robert Braggeec688e2016-11-07 19:49:47 +00002648};
2649
2650
Robert Bragg16d98b32016-12-07 21:40:33 +00002651/**
2652 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
2653 * @dev_priv: i915 device instance
2654 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
2655 * @props: individually validated u64 property value pairs
2656 * @file: drm file
2657 *
2658 * See i915_perf_ioctl_open() for interface details.
2659 *
2660 * Implements further stream config validation and stream initialization on
2661 * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
2662 * taken to serialize with any non-file-operation driver hooks.
2663 *
2664 * Note: at this point the @props have only been validated in isolation and
2665 * it's still necessary to validate that the combination of properties makes
2666 * sense.
2667 *
2668 * In the case where userspace is interested in OA unit metrics then further
2669 * config validation and stream initialization details will be handled by
2670 * i915_oa_stream_init(). The code here should only validate config state that
2671 * will be relevant to all stream types / backends.
2672 *
2673 * Returns: zero on success or a negative error code.
2674 */
Robert Braggeec688e2016-11-07 19:49:47 +00002675static int
2676i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
2677 struct drm_i915_perf_open_param *param,
2678 struct perf_open_properties *props,
2679 struct drm_file *file)
2680{
2681 struct i915_gem_context *specific_ctx = NULL;
2682 struct i915_perf_stream *stream = NULL;
2683 unsigned long f_flags = 0;
Robert Bragg19f81df2017-06-13 12:23:03 +01002684 bool privileged_op = true;
Robert Braggeec688e2016-11-07 19:49:47 +00002685 int stream_fd;
2686 int ret;
2687
2688 if (props->single_context) {
2689 u32 ctx_handle = props->ctx_handle;
2690 struct drm_i915_file_private *file_priv = file->driver_priv;
2691
Imre Deak635f56c2017-07-14 18:12:41 +03002692 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
2693 if (!specific_ctx) {
2694 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
2695 ctx_handle);
2696 ret = -ENOENT;
Robert Braggeec688e2016-11-07 19:49:47 +00002697 goto err;
2698 }
2699 }
2700
Robert Bragg19f81df2017-06-13 12:23:03 +01002701 /*
2702 * On Haswell the OA unit supports clock gating off for a specific
2703 * context and in this mode there's no visibility of metrics for the
2704 * rest of the system, which we consider acceptable for a
2705 * non-privileged client.
2706 *
2707 * For Gen8+ the OA unit no longer supports clock gating off for a
2708 * specific context and the kernel can't securely stop the counters
2709 * from updating as system-wide / global values. Even though we can
2710 * filter reports based on the included context ID we can't block
2711 * clients from seeing the raw / global counter values via
2712 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
2713 * enable the OA unit by default.
2714 */
2715 if (IS_HASWELL(dev_priv) && specific_ctx)
2716 privileged_op = false;
2717
Robert Braggccdf6342016-11-07 19:49:54 +00002718 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
2719 * we check a dev.i915.perf_stream_paranoid sysctl option
2720 * to determine if it's ok to access system wide OA counters
2721 * without CAP_SYS_ADMIN privileges.
2722 */
Robert Bragg19f81df2017-06-13 12:23:03 +01002723 if (privileged_op &&
Robert Braggccdf6342016-11-07 19:49:54 +00002724 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
Robert Bragg77085502016-12-01 17:21:52 +00002725 DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
Robert Braggeec688e2016-11-07 19:49:47 +00002726 ret = -EACCES;
2727 goto err_ctx;
2728 }
2729
2730 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
2731 if (!stream) {
2732 ret = -ENOMEM;
2733 goto err_ctx;
2734 }
2735
Robert Braggeec688e2016-11-07 19:49:47 +00002736 stream->dev_priv = dev_priv;
2737 stream->ctx = specific_ctx;
2738
Robert Braggd7965152016-11-07 19:49:52 +00002739 ret = i915_oa_stream_init(stream, param, props);
2740 if (ret)
2741 goto err_alloc;
2742
2743 /* we avoid simply assigning stream->sample_flags = props->sample_flags
2744 * to have _stream_init check the combination of sample flags more
2745 * thoroughly, but still this is the expected result at this point.
Robert Braggeec688e2016-11-07 19:49:47 +00002746 */
Robert Braggd7965152016-11-07 19:49:52 +00002747 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
2748 ret = -ENODEV;
Matthew Auld22f880c2017-03-27 21:34:59 +01002749 goto err_flags;
Robert Braggd7965152016-11-07 19:49:52 +00002750 }
Robert Braggeec688e2016-11-07 19:49:47 +00002751
2752 list_add(&stream->link, &dev_priv->perf.streams);
2753
2754 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
2755 f_flags |= O_CLOEXEC;
2756 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
2757 f_flags |= O_NONBLOCK;
2758
2759 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
2760 if (stream_fd < 0) {
2761 ret = stream_fd;
2762 goto err_open;
2763 }
2764
2765 if (!(param->flags & I915_PERF_FLAG_DISABLED))
2766 i915_perf_enable_locked(stream);
2767
Lionel Landwerlina5af1df2019-07-09 15:33:39 +03002768 /* Take a reference on the driver that will be kept with stream_fd
2769 * until its release.
2770 */
2771 drm_dev_get(&dev_priv->drm);
2772
Robert Braggeec688e2016-11-07 19:49:47 +00002773 return stream_fd;
2774
2775err_open:
2776 list_del(&stream->link);
Matthew Auld22f880c2017-03-27 21:34:59 +01002777err_flags:
Robert Braggeec688e2016-11-07 19:49:47 +00002778 if (stream->ops->destroy)
2779 stream->ops->destroy(stream);
2780err_alloc:
2781 kfree(stream);
2782err_ctx:
Chris Wilson69df05e2016-12-18 15:37:21 +00002783 if (specific_ctx)
Chris Wilson5f09a9c2017-06-20 12:05:46 +01002784 i915_gem_context_put(specific_ctx);
Robert Braggeec688e2016-11-07 19:49:47 +00002785err:
2786 return ret;
2787}
2788
Robert Bragg155e9412017-06-13 12:23:05 +01002789static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
2790{
Lionel Landwerlin9f9b2792017-10-27 15:59:31 +01002791 return div64_u64(1000000000ULL * (2ULL << exponent),
Jani Nikula02584042018-12-31 16:56:41 +02002792 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Robert Bragg155e9412017-06-13 12:23:05 +01002793}
2794
Robert Bragg16d98b32016-12-07 21:40:33 +00002795/**
2796 * read_properties_unlocked - validate + copy userspace stream open properties
2797 * @dev_priv: i915 device instance
2798 * @uprops: The array of u64 key value pairs given by userspace
2799 * @n_props: The number of key value pairs expected in @uprops
2800 * @props: The stream configuration built up while validating properties
Robert Braggeec688e2016-11-07 19:49:47 +00002801 *
2802 * Note this function only validates properties in isolation it doesn't
2803 * validate that the combination of properties makes sense or that all
2804 * properties necessary for a particular kind of stream have been set.
Robert Bragg16d98b32016-12-07 21:40:33 +00002805 *
2806 * Note that there currently aren't any ordering requirements for properties so
2807 * we shouldn't validate or assume anything about ordering here. This doesn't
2808 * rule out defining new properties with ordering requirements in the future.
Robert Braggeec688e2016-11-07 19:49:47 +00002809 */
2810static int read_properties_unlocked(struct drm_i915_private *dev_priv,
2811 u64 __user *uprops,
2812 u32 n_props,
2813 struct perf_open_properties *props)
2814{
2815 u64 __user *uprop = uprops;
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002816 u32 i;
Robert Braggeec688e2016-11-07 19:49:47 +00002817
2818 memset(props, 0, sizeof(struct perf_open_properties));
2819
2820 if (!n_props) {
Robert Bragg77085502016-12-01 17:21:52 +00002821 DRM_DEBUG("No i915 perf properties given\n");
Robert Braggeec688e2016-11-07 19:49:47 +00002822 return -EINVAL;
2823 }
2824
2825 /* Considering that ID = 0 is reserved and assuming that we don't
2826 * (currently) expect any configurations to ever specify duplicate
2827 * values for a particular property ID then the last _PROP_MAX value is
2828 * one greater than the maximum number of properties we expect to get
2829 * from userspace.
2830 */
2831 if (n_props >= DRM_I915_PERF_PROP_MAX) {
Robert Bragg77085502016-12-01 17:21:52 +00002832 DRM_DEBUG("More i915 perf properties specified than exist\n");
Robert Braggeec688e2016-11-07 19:49:47 +00002833 return -EINVAL;
2834 }
2835
2836 for (i = 0; i < n_props; i++) {
Robert Bragg00319ba2016-11-07 19:49:55 +00002837 u64 oa_period, oa_freq_hz;
Robert Braggeec688e2016-11-07 19:49:47 +00002838 u64 id, value;
2839 int ret;
2840
2841 ret = get_user(id, uprop);
2842 if (ret)
2843 return ret;
2844
2845 ret = get_user(value, uprop + 1);
2846 if (ret)
2847 return ret;
2848
Matthew Auld0a309f92017-03-27 21:32:36 +01002849 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
2850 DRM_DEBUG("Unknown i915 perf property ID\n");
2851 return -EINVAL;
2852 }
2853
Robert Braggeec688e2016-11-07 19:49:47 +00002854 switch ((enum drm_i915_perf_property_id)id) {
2855 case DRM_I915_PERF_PROP_CTX_HANDLE:
2856 props->single_context = 1;
2857 props->ctx_handle = value;
2858 break;
Robert Braggd7965152016-11-07 19:49:52 +00002859 case DRM_I915_PERF_PROP_SAMPLE_OA:
Lionel Landwerlinb6dd47b2018-03-26 10:08:22 +01002860 if (value)
2861 props->sample_flags |= SAMPLE_OA_REPORT;
Robert Braggd7965152016-11-07 19:49:52 +00002862 break;
2863 case DRM_I915_PERF_PROP_OA_METRICS_SET:
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002864 if (value == 0) {
Robert Bragg77085502016-12-01 17:21:52 +00002865 DRM_DEBUG("Unknown OA metric set ID\n");
Robert Braggd7965152016-11-07 19:49:52 +00002866 return -EINVAL;
2867 }
2868 props->metrics_set = value;
2869 break;
2870 case DRM_I915_PERF_PROP_OA_FORMAT:
2871 if (value == 0 || value >= I915_OA_FORMAT_MAX) {
Robert Bragg52c57c22017-05-11 16:43:29 +01002872 DRM_DEBUG("Out-of-range OA report format %llu\n",
2873 value);
Robert Braggd7965152016-11-07 19:49:52 +00002874 return -EINVAL;
2875 }
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07002876 if (!dev_priv->perf.oa_formats[value].size) {
Robert Bragg52c57c22017-05-11 16:43:29 +01002877 DRM_DEBUG("Unsupported OA report format %llu\n",
2878 value);
Robert Braggd7965152016-11-07 19:49:52 +00002879 return -EINVAL;
2880 }
2881 props->oa_format = value;
2882 break;
2883 case DRM_I915_PERF_PROP_OA_EXPONENT:
2884 if (value > OA_EXPONENT_MAX) {
Robert Bragg77085502016-12-01 17:21:52 +00002885 DRM_DEBUG("OA timer exponent too high (> %u)\n",
2886 OA_EXPONENT_MAX);
Robert Braggd7965152016-11-07 19:49:52 +00002887 return -EINVAL;
2888 }
2889
Robert Bragg00319ba2016-11-07 19:49:55 +00002890 /* Theoretically we can program the OA unit to sample
Robert Bragg155e9412017-06-13 12:23:05 +01002891 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
2892 * for BXT. We don't allow such high sampling
2893 * frequencies by default unless root.
Robert Braggd7965152016-11-07 19:49:52 +00002894 */
Robert Bragg155e9412017-06-13 12:23:05 +01002895
Robert Bragg00319ba2016-11-07 19:49:55 +00002896 BUILD_BUG_ON(sizeof(oa_period) != 8);
Robert Bragg155e9412017-06-13 12:23:05 +01002897 oa_period = oa_exponent_to_ns(dev_priv, value);
Robert Bragg00319ba2016-11-07 19:49:55 +00002898
2899 /* This check is primarily to ensure that oa_period <=
2900 * UINT32_MAX (before passing to do_div which only
2901 * accepts a u32 denominator), but we can also skip
2902 * checking anything < 1Hz which implicitly can't be
2903 * limited via an integer oa_max_sample_rate.
2904 */
2905 if (oa_period <= NSEC_PER_SEC) {
2906 u64 tmp = NSEC_PER_SEC;
2907 do_div(tmp, oa_period);
2908 oa_freq_hz = tmp;
2909 } else
2910 oa_freq_hz = 0;
2911
2912 if (oa_freq_hz > i915_oa_max_sample_rate &&
2913 !capable(CAP_SYS_ADMIN)) {
Robert Bragg77085502016-12-01 17:21:52 +00002914 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
Robert Bragg00319ba2016-11-07 19:49:55 +00002915 i915_oa_max_sample_rate);
Robert Braggd7965152016-11-07 19:49:52 +00002916 return -EACCES;
2917 }
2918
2919 props->oa_periodic = true;
2920 props->oa_period_exponent = value;
2921 break;
Matthew Auld0a309f92017-03-27 21:32:36 +01002922 case DRM_I915_PERF_PROP_MAX:
Robert Braggeec688e2016-11-07 19:49:47 +00002923 MISSING_CASE(id);
Robert Braggeec688e2016-11-07 19:49:47 +00002924 return -EINVAL;
2925 }
2926
2927 uprop += 2;
2928 }
2929
2930 return 0;
2931}
2932
Robert Bragg16d98b32016-12-07 21:40:33 +00002933/**
2934 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
2935 * @dev: drm device
2936 * @data: ioctl data copied from userspace (unvalidated)
2937 * @file: drm file
2938 *
2939 * Validates the stream open parameters given by userspace including flags
2940 * and an array of u64 key, value pair properties.
2941 *
2942 * Very little is assumed up front about the nature of the stream being
2943 * opened (for instance we don't assume it's for periodic OA unit metrics). An
2944 * i915-perf stream is expected to be a suitable interface for other forms of
2945 * buffered data written by the GPU besides periodic OA metrics.
2946 *
2947 * Note we copy the properties from userspace outside of the i915 perf
2948 * mutex to avoid an awkward lockdep with mmap_sem.
2949 *
2950 * Most of the implementation details are handled by
2951 * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
2952 * mutex for serializing with any non-file-operation driver hooks.
2953 *
2954 * Return: A newly opened i915 Perf stream file descriptor or negative
2955 * error code on failure.
2956 */
Robert Braggeec688e2016-11-07 19:49:47 +00002957int i915_perf_open_ioctl(struct drm_device *dev, void *data,
2958 struct drm_file *file)
2959{
2960 struct drm_i915_private *dev_priv = dev->dev_private;
2961 struct drm_i915_perf_open_param *param = data;
2962 struct perf_open_properties props;
2963 u32 known_open_flags;
2964 int ret;
2965
2966 if (!dev_priv->perf.initialized) {
Robert Bragg77085502016-12-01 17:21:52 +00002967 DRM_DEBUG("i915 perf interface not available for this system\n");
Robert Braggeec688e2016-11-07 19:49:47 +00002968 return -ENOTSUPP;
2969 }
2970
2971 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
2972 I915_PERF_FLAG_FD_NONBLOCK |
2973 I915_PERF_FLAG_DISABLED;
2974 if (param->flags & ~known_open_flags) {
Robert Bragg77085502016-12-01 17:21:52 +00002975 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
Robert Braggeec688e2016-11-07 19:49:47 +00002976 return -EINVAL;
2977 }
2978
2979 ret = read_properties_unlocked(dev_priv,
2980 u64_to_user_ptr(param->properties_ptr),
2981 param->num_properties,
2982 &props);
2983 if (ret)
2984 return ret;
2985
2986 mutex_lock(&dev_priv->perf.lock);
2987 ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
2988 mutex_unlock(&dev_priv->perf.lock);
2989
2990 return ret;
2991}
2992
Robert Bragg16d98b32016-12-07 21:40:33 +00002993/**
2994 * i915_perf_register - exposes i915-perf to userspace
2995 * @dev_priv: i915 device instance
2996 *
2997 * In particular OA metric sets are advertised under a sysfs metrics/
2998 * directory allowing userspace to enumerate valid IDs that can be
2999 * used to open an i915-perf stream.
3000 */
Robert Bragg442b8c02016-11-07 19:49:53 +00003001void i915_perf_register(struct drm_i915_private *dev_priv)
3002{
Lionel Landwerlin701f8232017-08-03 17:58:08 +01003003 int ret;
3004
Robert Bragg442b8c02016-11-07 19:49:53 +00003005 if (!dev_priv->perf.initialized)
3006 return;
3007
3008 /* To be sure we're synchronized with an attempted
3009 * i915_perf_open_ioctl(); considering that we register after
3010 * being exposed to userspace.
3011 */
3012 mutex_lock(&dev_priv->perf.lock);
3013
3014 dev_priv->perf.metrics_kobj =
3015 kobject_create_and_add("metrics",
3016 &dev_priv->drm.primary->kdev->kobj);
3017 if (!dev_priv->perf.metrics_kobj)
3018 goto exit;
3019
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003020 sysfs_attr_init(&dev_priv->perf.test_config.sysfs_metric_id.attr);
Lionel Landwerlin701f8232017-08-03 17:58:08 +01003021
Rodrigo Vivi2dd24a92019-03-08 13:42:58 -08003022 if (INTEL_GEN(dev_priv) >= 11) {
Rodrigo Vivi993298a2019-03-01 09:27:03 -08003023 i915_perf_load_test_config_icl(dev_priv);
3024 } else if (IS_CANNONLAKE(dev_priv)) {
3025 i915_perf_load_test_config_cnl(dev_priv);
3026 } else if (IS_COFFEELAKE(dev_priv)) {
3027 if (IS_CFL_GT2(dev_priv))
3028 i915_perf_load_test_config_cflgt2(dev_priv);
3029 if (IS_CFL_GT3(dev_priv))
3030 i915_perf_load_test_config_cflgt3(dev_priv);
3031 } else if (IS_GEMINILAKE(dev_priv)) {
3032 i915_perf_load_test_config_glk(dev_priv);
3033 } else if (IS_KABYLAKE(dev_priv)) {
3034 if (IS_KBL_GT2(dev_priv))
3035 i915_perf_load_test_config_kblgt2(dev_priv);
3036 else if (IS_KBL_GT3(dev_priv))
3037 i915_perf_load_test_config_kblgt3(dev_priv);
3038 } else if (IS_BROXTON(dev_priv)) {
3039 i915_perf_load_test_config_bxt(dev_priv);
Robert Bragg19f81df2017-06-13 12:23:03 +01003040 } else if (IS_SKYLAKE(dev_priv)) {
Lionel Landwerlin701f8232017-08-03 17:58:08 +01003041 if (IS_SKL_GT2(dev_priv))
3042 i915_perf_load_test_config_sklgt2(dev_priv);
3043 else if (IS_SKL_GT3(dev_priv))
3044 i915_perf_load_test_config_sklgt3(dev_priv);
3045 else if (IS_SKL_GT4(dev_priv))
3046 i915_perf_load_test_config_sklgt4(dev_priv);
Rodrigo Vivi993298a2019-03-01 09:27:03 -08003047 } else if (IS_CHERRYVIEW(dev_priv)) {
3048 i915_perf_load_test_config_chv(dev_priv);
3049 } else if (IS_BROADWELL(dev_priv)) {
3050 i915_perf_load_test_config_bdw(dev_priv);
3051 } else if (IS_HASWELL(dev_priv)) {
3052 i915_perf_load_test_config_hsw(dev_priv);
3053}
Robert Bragg442b8c02016-11-07 19:49:53 +00003054
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003055 if (dev_priv->perf.test_config.id == 0)
Lionel Landwerlin701f8232017-08-03 17:58:08 +01003056 goto sysfs_error;
3057
3058 ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003059 &dev_priv->perf.test_config.sysfs_metric);
Lionel Landwerlin701f8232017-08-03 17:58:08 +01003060 if (ret)
3061 goto sysfs_error;
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003062
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003063 atomic_set(&dev_priv->perf.test_config.ref_count, 1);
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003064
Robert Bragg19f81df2017-06-13 12:23:03 +01003065 goto exit;
3066
3067sysfs_error:
3068 kobject_put(dev_priv->perf.metrics_kobj);
3069 dev_priv->perf.metrics_kobj = NULL;
3070
Robert Bragg442b8c02016-11-07 19:49:53 +00003071exit:
3072 mutex_unlock(&dev_priv->perf.lock);
3073}
3074
Robert Bragg16d98b32016-12-07 21:40:33 +00003075/**
3076 * i915_perf_unregister - hide i915-perf from userspace
3077 * @dev_priv: i915 device instance
3078 *
3079 * i915-perf state cleanup is split up into an 'unregister' and
3080 * 'deinit' phase where the interface is first hidden from
3081 * userspace by i915_perf_unregister() before cleaning up
3082 * remaining state in i915_perf_fini().
3083 */
Robert Bragg442b8c02016-11-07 19:49:53 +00003084void i915_perf_unregister(struct drm_i915_private *dev_priv)
3085{
Robert Bragg442b8c02016-11-07 19:49:53 +00003086 if (!dev_priv->perf.metrics_kobj)
3087 return;
3088
Lionel Landwerlin701f8232017-08-03 17:58:08 +01003089 sysfs_remove_group(dev_priv->perf.metrics_kobj,
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003090 &dev_priv->perf.test_config.sysfs_metric);
Robert Bragg442b8c02016-11-07 19:49:53 +00003091
3092 kobject_put(dev_priv->perf.metrics_kobj);
3093 dev_priv->perf.metrics_kobj = NULL;
3094}
3095
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003096static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
3097{
3098 static const i915_reg_t flex_eu_regs[] = {
3099 EU_PERF_CNTL0,
3100 EU_PERF_CNTL1,
3101 EU_PERF_CNTL2,
3102 EU_PERF_CNTL3,
3103 EU_PERF_CNTL4,
3104 EU_PERF_CNTL5,
3105 EU_PERF_CNTL6,
3106 };
3107 int i;
3108
3109 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003110 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003111 return true;
3112 }
3113 return false;
3114}
3115
3116static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
3117{
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003118 return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
3119 addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
3120 (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
3121 addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
3122 (addr >= i915_mmio_reg_offset(OACEC0_0) &&
3123 addr <= i915_mmio_reg_offset(OACEC7_1));
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003124}
3125
3126static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3127{
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003128 return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
3129 (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
3130 addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
3131 (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
3132 addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
3133 (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
3134 addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003135}
3136
3137static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3138{
3139 return gen7_is_valid_mux_addr(dev_priv, addr) ||
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003140 addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
3141 (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
3142 addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003143}
3144
Lionel Landwerlin95690a02017-11-10 19:08:43 +00003145static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3146{
3147 return gen8_is_valid_mux_addr(dev_priv, addr) ||
Lionel Landwerlinbf210f62019-06-02 01:58:45 +03003148 addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) ||
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003149 (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
3150 addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
Lionel Landwerlin95690a02017-11-10 19:08:43 +00003151}
3152
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003153static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3154{
3155 return gen7_is_valid_mux_addr(dev_priv, addr) ||
3156 (addr >= 0x25100 && addr <= 0x2FF90) ||
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003157 (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
3158 addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
3159 addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003160}
3161
3162static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3163{
3164 return gen7_is_valid_mux_addr(dev_priv, addr) ||
3165 (addr >= 0x182300 && addr <= 0x1823A4);
3166}
3167
Jani Nikula739f3ab2019-01-16 11:15:19 +02003168static u32 mask_reg_value(u32 reg, u32 val)
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003169{
3170 /* HALF_SLICE_CHICKEN2 is programmed with a the
3171 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3172 * programmed by userspace doesn't change this.
3173 */
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003174 if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003175 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3176
3177 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3178 * indicated by its name and a bunch of selection fields used by OA
3179 * configs.
3180 */
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003181 if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003182 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3183
3184 return val;
3185}
3186
3187static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
3188 bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
3189 u32 __user *regs,
3190 u32 n_regs)
3191{
3192 struct i915_oa_reg *oa_regs;
3193 int err;
3194 u32 i;
3195
3196 if (!n_regs)
3197 return NULL;
3198
Linus Torvalds96d4f262019-01-03 18:57:57 -08003199 if (!access_ok(regs, n_regs * sizeof(u32) * 2))
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003200 return ERR_PTR(-EFAULT);
3201
3202 /* No is_valid function means we're not allowing any register to be programmed. */
3203 GEM_BUG_ON(!is_valid);
3204 if (!is_valid)
3205 return ERR_PTR(-EINVAL);
3206
3207 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
3208 if (!oa_regs)
3209 return ERR_PTR(-ENOMEM);
3210
3211 for (i = 0; i < n_regs; i++) {
3212 u32 addr, value;
3213
3214 err = get_user(addr, regs);
3215 if (err)
3216 goto addr_err;
3217
3218 if (!is_valid(dev_priv, addr)) {
3219 DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
3220 err = -EINVAL;
3221 goto addr_err;
3222 }
3223
3224 err = get_user(value, regs + 1);
3225 if (err)
3226 goto addr_err;
3227
3228 oa_regs[i].addr = _MMIO(addr);
3229 oa_regs[i].value = mask_reg_value(addr, value);
3230
3231 regs += 2;
3232 }
3233
3234 return oa_regs;
3235
3236addr_err:
3237 kfree(oa_regs);
3238 return ERR_PTR(err);
3239}
3240
3241static ssize_t show_dynamic_id(struct device *dev,
3242 struct device_attribute *attr,
3243 char *buf)
3244{
3245 struct i915_oa_config *oa_config =
3246 container_of(attr, typeof(*oa_config), sysfs_metric_id);
3247
3248 return sprintf(buf, "%d\n", oa_config->id);
3249}
3250
3251static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
3252 struct i915_oa_config *oa_config)
3253{
Chris Wilson28152a22017-08-03 23:37:00 +01003254 sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003255 oa_config->sysfs_metric_id.attr.name = "id";
3256 oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
3257 oa_config->sysfs_metric_id.show = show_dynamic_id;
3258 oa_config->sysfs_metric_id.store = NULL;
3259
3260 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
3261 oa_config->attrs[1] = NULL;
3262
3263 oa_config->sysfs_metric.name = oa_config->uuid;
3264 oa_config->sysfs_metric.attrs = oa_config->attrs;
3265
3266 return sysfs_create_group(dev_priv->perf.metrics_kobj,
3267 &oa_config->sysfs_metric);
3268}
3269
3270/**
3271 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
3272 * @dev: drm device
3273 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
3274 * userspace (unvalidated)
3275 * @file: drm file
3276 *
3277 * Validates the submitted OA register to be saved into a new OA config that
3278 * can then be used for programming the OA unit and its NOA network.
3279 *
3280 * Returns: A new allocated config number to be used with the perf open ioctl
3281 * or a negative error code on failure.
3282 */
3283int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3284 struct drm_file *file)
3285{
3286 struct drm_i915_private *dev_priv = dev->dev_private;
3287 struct drm_i915_perf_oa_config *args = data;
3288 struct i915_oa_config *oa_config, *tmp;
3289 int err, id;
3290
3291 if (!dev_priv->perf.initialized) {
3292 DRM_DEBUG("i915 perf interface not available for this system\n");
3293 return -ENOTSUPP;
3294 }
3295
3296 if (!dev_priv->perf.metrics_kobj) {
3297 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
3298 return -EINVAL;
3299 }
3300
3301 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3302 DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
3303 return -EACCES;
3304 }
3305
3306 if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
3307 (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
3308 (!args->flex_regs_ptr || !args->n_flex_regs)) {
3309 DRM_DEBUG("No OA registers given\n");
3310 return -EINVAL;
3311 }
3312
3313 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
3314 if (!oa_config) {
3315 DRM_DEBUG("Failed to allocate memory for the OA config\n");
3316 return -ENOMEM;
3317 }
3318
3319 atomic_set(&oa_config->ref_count, 1);
3320
3321 if (!uuid_is_valid(args->uuid)) {
3322 DRM_DEBUG("Invalid uuid format for OA config\n");
3323 err = -EINVAL;
3324 goto reg_err;
3325 }
3326
3327 /* Last character in oa_config->uuid will be 0 because oa_config is
3328 * kzalloc.
3329 */
3330 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
3331
3332 oa_config->mux_regs_len = args->n_mux_regs;
3333 oa_config->mux_regs =
3334 alloc_oa_regs(dev_priv,
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003335 dev_priv->perf.ops.is_valid_mux_reg,
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003336 u64_to_user_ptr(args->mux_regs_ptr),
3337 args->n_mux_regs);
3338
3339 if (IS_ERR(oa_config->mux_regs)) {
3340 DRM_DEBUG("Failed to create OA config for mux_regs\n");
3341 err = PTR_ERR(oa_config->mux_regs);
3342 goto reg_err;
3343 }
3344
3345 oa_config->b_counter_regs_len = args->n_boolean_regs;
3346 oa_config->b_counter_regs =
3347 alloc_oa_regs(dev_priv,
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003348 dev_priv->perf.ops.is_valid_b_counter_reg,
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003349 u64_to_user_ptr(args->boolean_regs_ptr),
3350 args->n_boolean_regs);
3351
3352 if (IS_ERR(oa_config->b_counter_regs)) {
3353 DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
3354 err = PTR_ERR(oa_config->b_counter_regs);
3355 goto reg_err;
3356 }
3357
3358 if (INTEL_GEN(dev_priv) < 8) {
3359 if (args->n_flex_regs != 0) {
3360 err = -EINVAL;
3361 goto reg_err;
3362 }
3363 } else {
3364 oa_config->flex_regs_len = args->n_flex_regs;
3365 oa_config->flex_regs =
3366 alloc_oa_regs(dev_priv,
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003367 dev_priv->perf.ops.is_valid_flex_reg,
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003368 u64_to_user_ptr(args->flex_regs_ptr),
3369 args->n_flex_regs);
3370
3371 if (IS_ERR(oa_config->flex_regs)) {
3372 DRM_DEBUG("Failed to create OA config for flex_regs\n");
3373 err = PTR_ERR(oa_config->flex_regs);
3374 goto reg_err;
3375 }
3376 }
3377
3378 err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
3379 if (err)
3380 goto reg_err;
3381
3382 /* We shouldn't have too many configs, so this iteration shouldn't be
3383 * too costly.
3384 */
3385 idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
3386 if (!strcmp(tmp->uuid, oa_config->uuid)) {
3387 DRM_DEBUG("OA config already exists with this uuid\n");
3388 err = -EADDRINUSE;
3389 goto sysfs_err;
3390 }
3391 }
3392
3393 err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
3394 if (err) {
3395 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
3396 goto sysfs_err;
3397 }
3398
3399 /* Config id 0 is invalid, id 1 for kernel stored test config. */
3400 oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
3401 oa_config, 2,
3402 0, GFP_KERNEL);
3403 if (oa_config->id < 0) {
3404 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
3405 err = oa_config->id;
3406 goto sysfs_err;
3407 }
3408
3409 mutex_unlock(&dev_priv->perf.metrics_lock);
3410
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01003411 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
3412
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003413 return oa_config->id;
3414
3415sysfs_err:
3416 mutex_unlock(&dev_priv->perf.metrics_lock);
3417reg_err:
3418 put_oa_config(dev_priv, oa_config);
3419 DRM_DEBUG("Failed to add new OA config\n");
3420 return err;
3421}
3422
3423/**
3424 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
3425 * @dev: drm device
3426 * @data: ioctl data (pointer to u64 integer) copied from userspace
3427 * @file: drm file
3428 *
3429 * Configs can be removed while being used, the will stop appearing in sysfs
3430 * and their content will be freed when the stream using the config is closed.
3431 *
3432 * Returns: 0 on success or a negative error code on failure.
3433 */
3434int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
3435 struct drm_file *file)
3436{
3437 struct drm_i915_private *dev_priv = dev->dev_private;
3438 u64 *arg = data;
3439 struct i915_oa_config *oa_config;
3440 int ret;
3441
3442 if (!dev_priv->perf.initialized) {
3443 DRM_DEBUG("i915 perf interface not available for this system\n");
3444 return -ENOTSUPP;
3445 }
3446
3447 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3448 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
3449 return -EACCES;
3450 }
3451
3452 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
3453 if (ret)
3454 goto lock_err;
3455
3456 oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
3457 if (!oa_config) {
3458 DRM_DEBUG("Failed to remove unknown OA config\n");
3459 ret = -ENOENT;
3460 goto config_err;
3461 }
3462
3463 GEM_BUG_ON(*arg != oa_config->id);
3464
3465 sysfs_remove_group(dev_priv->perf.metrics_kobj,
3466 &oa_config->sysfs_metric);
3467
3468 idr_remove(&dev_priv->perf.metrics_idr, *arg);
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01003469
3470 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
3471
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003472 put_oa_config(dev_priv, oa_config);
3473
3474config_err:
3475 mutex_unlock(&dev_priv->perf.metrics_lock);
3476lock_err:
3477 return ret;
3478}
3479
Robert Braggccdf6342016-11-07 19:49:54 +00003480static struct ctl_table oa_table[] = {
3481 {
3482 .procname = "perf_stream_paranoid",
3483 .data = &i915_perf_stream_paranoid,
3484 .maxlen = sizeof(i915_perf_stream_paranoid),
3485 .mode = 0644,
3486 .proc_handler = proc_dointvec_minmax,
Matteo Croceeec48442019-07-18 15:58:50 -07003487 .extra1 = SYSCTL_ZERO,
3488 .extra2 = SYSCTL_ONE,
Robert Braggccdf6342016-11-07 19:49:54 +00003489 },
Robert Bragg00319ba2016-11-07 19:49:55 +00003490 {
3491 .procname = "oa_max_sample_rate",
3492 .data = &i915_oa_max_sample_rate,
3493 .maxlen = sizeof(i915_oa_max_sample_rate),
3494 .mode = 0644,
3495 .proc_handler = proc_dointvec_minmax,
Matteo Croceeec48442019-07-18 15:58:50 -07003496 .extra1 = SYSCTL_ZERO,
Robert Bragg00319ba2016-11-07 19:49:55 +00003497 .extra2 = &oa_sample_rate_hard_limit,
3498 },
Robert Braggccdf6342016-11-07 19:49:54 +00003499 {}
3500};
3501
3502static struct ctl_table i915_root[] = {
3503 {
3504 .procname = "i915",
3505 .maxlen = 0,
3506 .mode = 0555,
3507 .child = oa_table,
3508 },
3509 {}
3510};
3511
3512static struct ctl_table dev_root[] = {
3513 {
3514 .procname = "dev",
3515 .maxlen = 0,
3516 .mode = 0555,
3517 .child = i915_root,
3518 },
3519 {}
3520};
3521
Robert Bragg16d98b32016-12-07 21:40:33 +00003522/**
3523 * i915_perf_init - initialize i915-perf state on module load
3524 * @dev_priv: i915 device instance
3525 *
3526 * Initializes i915-perf state without exposing anything to userspace.
3527 *
3528 * Note: i915-perf initialization is split into an 'init' and 'register'
3529 * phase with the i915_perf_register() exposing state to userspace.
3530 */
Robert Braggeec688e2016-11-07 19:49:47 +00003531void i915_perf_init(struct drm_i915_private *dev_priv)
3532{
Robert Bragg19f81df2017-06-13 12:23:03 +01003533 if (IS_HASWELL(dev_priv)) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003534 dev_priv->perf.ops.is_valid_b_counter_reg =
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003535 gen7_is_valid_b_counter_addr;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003536 dev_priv->perf.ops.is_valid_mux_reg =
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003537 hsw_is_valid_mux_addr;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003538 dev_priv->perf.ops.is_valid_flex_reg = NULL;
3539 dev_priv->perf.ops.enable_metric_set = hsw_enable_metric_set;
3540 dev_priv->perf.ops.disable_metric_set = hsw_disable_metric_set;
3541 dev_priv->perf.ops.oa_enable = gen7_oa_enable;
3542 dev_priv->perf.ops.oa_disable = gen7_oa_disable;
3543 dev_priv->perf.ops.read = gen7_oa_read;
3544 dev_priv->perf.ops.oa_hw_tail_read =
Robert Bragg19f81df2017-06-13 12:23:03 +01003545 gen7_oa_hw_tail_read;
Robert Braggd7965152016-11-07 19:49:52 +00003546
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003547 dev_priv->perf.oa_formats = hsw_oa_formats;
Chris Wilsonfb5c5512017-11-20 20:55:00 +00003548 } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
Robert Bragg19f81df2017-06-13 12:23:03 +01003549 /* Note: that although we could theoretically also support the
3550 * legacy ringbuffer mode on BDW (and earlier iterations of
3551 * this driver, before upstreaming did this) it didn't seem
3552 * worth the complexity to maintain now that BDW+ enable
3553 * execlist mode by default.
3554 */
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003555 dev_priv->perf.oa_formats = gen8_plus_oa_formats;
Robert Braggd7965152016-11-07 19:49:52 +00003556
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003557 dev_priv->perf.ops.oa_enable = gen8_oa_enable;
3558 dev_priv->perf.ops.oa_disable = gen8_oa_disable;
3559 dev_priv->perf.ops.read = gen8_oa_read;
3560 dev_priv->perf.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
Lionel Landwerlin701f8232017-08-03 17:58:08 +01003561
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003562 if (IS_GEN_RANGE(dev_priv, 8, 9)) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003563 dev_priv->perf.ops.is_valid_b_counter_reg =
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003564 gen7_is_valid_b_counter_addr;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003565 dev_priv->perf.ops.is_valid_mux_reg =
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003566 gen8_is_valid_mux_addr;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003567 dev_priv->perf.ops.is_valid_flex_reg =
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003568 gen8_is_valid_flex_addr;
Lionel Landwerlin701f8232017-08-03 17:58:08 +01003569
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003570 if (IS_CHERRYVIEW(dev_priv)) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003571 dev_priv->perf.ops.is_valid_mux_reg =
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003572 chv_is_valid_mux_addr;
3573 }
Robert Bragg155e9412017-06-13 12:23:05 +01003574
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003575 dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
3576 dev_priv->perf.ops.disable_metric_set = gen8_disable_metric_set;
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003577
Lucas De Marchicf819ef2018-12-12 10:10:43 -08003578 if (IS_GEN(dev_priv, 8)) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003579 dev_priv->perf.ctx_oactxctrl_offset = 0x120;
3580 dev_priv->perf.ctx_flexeu0_offset = 0x2ce;
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003581
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003582 dev_priv->perf.gen8_valid_ctx_bit = BIT(25);
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003583 } else {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003584 dev_priv->perf.ctx_oactxctrl_offset = 0x128;
3585 dev_priv->perf.ctx_flexeu0_offset = 0x3de;
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003586
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003587 dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003588 }
Lucas De Marchi00690002018-12-12 10:10:42 -08003589 } else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003590 dev_priv->perf.ops.is_valid_b_counter_reg =
Lionel Landwerlin95690a02017-11-10 19:08:43 +00003591 gen7_is_valid_b_counter_addr;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003592 dev_priv->perf.ops.is_valid_mux_reg =
Lionel Landwerlin95690a02017-11-10 19:08:43 +00003593 gen10_is_valid_mux_addr;
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003594 dev_priv->perf.ops.is_valid_flex_reg =
Lionel Landwerlin95690a02017-11-10 19:08:43 +00003595 gen8_is_valid_flex_addr;
3596
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003597 dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
3598 dev_priv->perf.ops.disable_metric_set = gen10_disable_metric_set;
Lionel Landwerlin95690a02017-11-10 19:08:43 +00003599
Lionel Landwerlin8dcfdfb2019-06-10 11:19:14 +03003600 if (IS_GEN(dev_priv, 10)) {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003601 dev_priv->perf.ctx_oactxctrl_offset = 0x128;
3602 dev_priv->perf.ctx_flexeu0_offset = 0x3de;
Lionel Landwerlin8dcfdfb2019-06-10 11:19:14 +03003603 } else {
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003604 dev_priv->perf.ctx_oactxctrl_offset = 0x124;
3605 dev_priv->perf.ctx_flexeu0_offset = 0x78e;
Lionel Landwerlin8dcfdfb2019-06-10 11:19:14 +03003606 }
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003607 dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
Robert Bragg19f81df2017-06-13 12:23:03 +01003608 }
Robert Bragg19f81df2017-06-13 12:23:03 +01003609 }
3610
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003611 if (dev_priv->perf.ops.enable_metric_set) {
Robert Bragg19f81df2017-06-13 12:23:03 +01003612 INIT_LIST_HEAD(&dev_priv->perf.streams);
3613 mutex_init(&dev_priv->perf.lock);
Robert Bragg19f81df2017-06-13 12:23:03 +01003614
Lionel Landwerlin9f9b2792017-10-27 15:59:31 +01003615 oa_sample_rate_hard_limit = 1000 *
Jani Nikula02584042018-12-31 16:56:41 +02003616 (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
Robert Bragg19f81df2017-06-13 12:23:03 +01003617 dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
3618
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003619 mutex_init(&dev_priv->perf.metrics_lock);
3620 idr_init(&dev_priv->perf.metrics_idr);
3621
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003622 /* We set up some ratelimit state to potentially throttle any
3623 * _NOTES about spurious, invalid OA reports which we don't
3624 * forward to userspace.
3625 *
3626 * We print a _NOTE about any throttling when closing the
3627 * stream instead of waiting until driver _fini which no one
3628 * would ever see.
3629 *
3630 * Using the same limiting factors as printk_ratelimit()
3631 */
3632 ratelimit_state_init(&dev_priv->perf.spurious_report_rs,
3633 5 * HZ, 10);
3634 /* Since we use a DRM_NOTE for spurious reports it would be
3635 * inconsistent to let __ratelimit() automatically print a
3636 * warning for throttling.
3637 */
3638 ratelimit_set_flags(&dev_priv->perf.spurious_report_rs,
3639 RATELIMIT_MSG_ON_RELEASE);
3640
Robert Bragg19f81df2017-06-13 12:23:03 +01003641 dev_priv->perf.initialized = true;
3642 }
Robert Braggeec688e2016-11-07 19:49:47 +00003643}
3644
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003645static int destroy_config(int id, void *p, void *data)
3646{
3647 struct drm_i915_private *dev_priv = data;
3648 struct i915_oa_config *oa_config = p;
3649
3650 put_oa_config(dev_priv, oa_config);
3651
3652 return 0;
3653}
3654
Robert Bragg16d98b32016-12-07 21:40:33 +00003655/**
3656 * i915_perf_fini - Counter part to i915_perf_init()
3657 * @dev_priv: i915 device instance
3658 */
Robert Braggeec688e2016-11-07 19:49:47 +00003659void i915_perf_fini(struct drm_i915_private *dev_priv)
3660{
3661 if (!dev_priv->perf.initialized)
3662 return;
3663
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003664 idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
3665 idr_destroy(&dev_priv->perf.metrics_idr);
3666
Robert Braggccdf6342016-11-07 19:49:54 +00003667 unregister_sysctl_table(dev_priv->perf.sysctl_header);
3668
Umesh Nerlige Ramappaa37f08a2019-08-06 16:30:02 -07003669 memset(&dev_priv->perf.ops, 0, sizeof(dev_priv->perf.ops));
Robert Bragg19f81df2017-06-13 12:23:03 +01003670
Robert Braggeec688e2016-11-07 19:49:47 +00003671 dev_priv->perf.initialized = false;
3672}