blob: afaeabe5e5314666bc997fb4d7c010db25ad141a [file] [log] [blame]
Robert Braggeec688e2016-11-07 19:49:47 +00001/*
2 * Copyright © 2015-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Robert Bragg <robert@sixbynine.org>
25 */
26
Robert Bragg7abbd8d2016-11-07 19:49:57 +000027
28/**
Robert Bragg16d98b32016-12-07 21:40:33 +000029 * DOC: i915 Perf Overview
Robert Bragg7abbd8d2016-11-07 19:49:57 +000030 *
31 * Gen graphics supports a large number of performance counters that can help
32 * driver and application developers understand and optimize their use of the
33 * GPU.
34 *
35 * This i915 perf interface enables userspace to configure and open a file
36 * descriptor representing a stream of GPU metrics which can then be read() as
37 * a stream of sample records.
38 *
39 * The interface is particularly suited to exposing buffered metrics that are
40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41 *
42 * Streams representing a single context are accessible to applications with a
43 * corresponding drm file descriptor, such that OpenGL can use the interface
44 * without special privileges. Access to system-wide metrics requires root
45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46 * sysctl option.
47 *
Robert Bragg16d98b32016-12-07 21:40:33 +000048 */
49
50/**
51 * DOC: i915 Perf History and Comparison with Core Perf
Robert Bragg7abbd8d2016-11-07 19:49:57 +000052 *
53 * The interface was initially inspired by the core Perf infrastructure but
54 * some notable differences are:
55 *
56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
57 * a perf event primarily corresponds to a single 64bit value, while a stream
58 * might sample sets of tightly-coupled counters, depending on the
59 * configuration. For example the Gen OA unit isn't designed to support
60 * orthogonal configurations of individual counters; it's configured for a set
61 * of related counters. Samples for an i915 perf stream capturing OA metrics
62 * will include a set of counter values packed in a compact HW specific format.
63 * The OA unit supports a number of different packing formats which can be
64 * selected by the user opening the stream. Perf has support for grouping
65 * events, but each event in the group is configured, validated and
66 * authenticated individually with separate system calls.
67 *
68 * i915 perf stream configurations are provided as an array of u64 (key,value)
69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
70 * interleaved with event-type specific members.
71 *
72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73 * The supported metrics are being written to memory by the GPU unsynchronized
74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
75 * the constraints on HW configuration require reports to be filtered before it
76 * would be acceptable to expose them to unprivileged applications - to hide
77 * the metrics of other processes/contexts. For these use cases a read() based
78 * interface is a good fit, and provides an opportunity to filter data as it
79 * gets copied from the GPU mapped buffers to userspace buffers.
80 *
81 *
Robert Bragg16d98b32016-12-07 21:40:33 +000082 * Issues hit with first prototype based on Core Perf
83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Robert Bragg7abbd8d2016-11-07 19:49:57 +000084 *
85 * The first prototype of this driver was based on the core perf
86 * infrastructure, and while we did make that mostly work, with some changes to
87 * perf, we found we were breaking or working around too many assumptions baked
88 * into perf's currently cpu centric design.
89 *
90 * In the end we didn't see a clear benefit to making perf's implementation and
91 * interface more complex by changing design assumptions while we knew we still
92 * wouldn't be able to use any existing perf based userspace tools.
93 *
94 * Also considering the Gen specific nature of the Observability hardware and
95 * how userspace will sometimes need to combine i915 perf OA metrics with
96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97 * expecting the interface to be used by a platform specific userspace such as
98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
99 * a standard vendor/architecture agnostic interface by not using perf.
100 *
101 *
102 * For posterity, in case we might re-visit trying to adapt core perf to be
103 * better suited to exposing i915 metrics these were the main pain points we
104 * hit:
105 *
106 * - The perf based OA PMU driver broke some significant design assumptions:
107 *
108 * Existing perf pmus are used for profiling work on a cpu and we were
109 * introducing the idea of _IS_DEVICE pmus with different security
110 * implications, the need to fake cpu-related data (such as user/kernel
111 * registers) to fit with perf's current design, and adding _DEVICE records
112 * as a way to forward device-specific status records.
113 *
114 * The OA unit writes reports of counters into a circular buffer, without
115 * involvement from the CPU, making our PMU driver the first of a kind.
116 *
117 * Given the way we were periodically forward data from the GPU-mapped, OA
118 * buffer to perf's buffer, those bursts of sample writes looked to perf like
119 * we were sampling too fast and so we had to subvert its throttling checks.
120 *
121 * Perf supports groups of counters and allows those to be read via
122 * transactions internally but transactions currently seem designed to be
123 * explicitly initiated from the cpu (say in response to a userspace read())
124 * and while we could pull a report out of the OA buffer we can't
125 * trigger a report from the cpu on demand.
126 *
127 * Related to being report based; the OA counters are configured in HW as a
128 * set while perf generally expects counter configurations to be orthogonal.
129 * Although counters can be associated with a group leader as they are
130 * opened, there's no clear precedent for being able to provide group-wide
131 * configuration attributes (for example we want to let userspace choose the
132 * OA unit report format used to capture all counters in a set, or specify a
133 * GPU context to filter metrics on). We avoided using perf's grouping
134 * feature and forwarded OA reports to userspace via perf's 'raw' sample
135 * field. This suited our userspace well considering how coupled the counters
136 * are when dealing with normalizing. It would be inconvenient to split
137 * counters up into separate events, only to require userspace to recombine
138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports
139 * for combining with the side-band raw reports it captures using
140 * MI_REPORT_PERF_COUNT commands.
141 *
Robert Bragg16d98b32016-12-07 21:40:33 +0000142 * - As a side note on perf's grouping feature; there was also some concern
Robert Bragg7abbd8d2016-11-07 19:49:57 +0000143 * that using PERF_FORMAT_GROUP as a way to pack together counter values
144 * would quite drastically inflate our sample sizes, which would likely
145 * lower the effective sampling resolutions we could use when the available
146 * memory bandwidth is limited.
147 *
148 * With the OA unit's report formats, counters are packed together as 32
149 * or 40bit values, with the largest report size being 256 bytes.
150 *
151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152 * documented ordering to the values, implying PERF_FORMAT_ID must also be
153 * used to add a 64bit ID before each value; giving 16 bytes per counter.
154 *
155 * Related to counter orthogonality; we can't time share the OA unit, while
156 * event scheduling is a central design idea within perf for allowing
157 * userspace to open + enable more events than can be configured in HW at any
158 * one time. The OA unit is not designed to allow re-configuration while in
159 * use. We can't reconfigure the OA unit without losing internal OA unit
160 * state which we can't access explicitly to save and restore. Reconfiguring
161 * the OA unit is also relatively slow, involving ~100 register writes. From
162 * userspace Mesa also depends on a stable OA configuration when emitting
163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164 * disabled while there are outstanding MI_RPC commands lest we hang the
165 * command streamer.
166 *
167 * The contents of sample records aren't extensible by device drivers (i.e.
168 * the sample_type bits). As an example; Sourab Gupta had been looking to
169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports
170 * into sample records by using the 'raw' field, but it's tricky to pack more
171 * than one thing into this field because events/core.c currently only lets a
172 * pmu give a single raw data pointer plus len which will be copied into the
173 * ring buffer. To include more than the OA report we'd have to copy the
174 * report into an intermediate larger buffer. I'd been considering allowing a
175 * vector of data+len values to be specified for copying the raw data, but
176 * it felt like a kludge to being using the raw field for this purpose.
177 *
178 * - It felt like our perf based PMU was making some technical compromises
179 * just for the sake of using perf:
180 *
181 * perf_event_open() requires events to either relate to a pid or a specific
182 * cpu core, while our device pmu related to neither. Events opened with a
183 * pid will be automatically enabled/disabled according to the scheduling of
184 * that process - so not appropriate for us. When an event is related to a
185 * cpu id, perf ensures pmu methods will be invoked via an inter process
186 * interrupt on that core. To avoid invasive changes our userspace opened OA
187 * perf events for a specific cpu. This was workable but it meant the
188 * majority of the OA driver ran in atomic context, including all OA report
189 * forwarding, which wasn't really necessary in our case and seems to make
190 * our locking requirements somewhat complex as we handled the interaction
191 * with the rest of the i915 driver.
192 */
193
Robert Braggeec688e2016-11-07 19:49:47 +0000194#include <linux/anon_inodes.h>
Robert Braggd7965152016-11-07 19:49:52 +0000195#include <linux/sizes.h>
Lionel Landwerlinf89823c2017-08-03 18:05:50 +0100196#include <linux/uuid.h>
Robert Braggeec688e2016-11-07 19:49:47 +0000197
Chris Wilson112ed2d2019-04-24 18:48:39 +0100198#include "gt/intel_lrc_reg.h"
199
Robert Braggeec688e2016-11-07 19:49:47 +0000200#include "i915_drv.h"
Robert Braggd7965152016-11-07 19:49:52 +0000201#include "i915_oa_hsw.h"
Robert Bragg19f81df2017-06-13 12:23:03 +0100202#include "i915_oa_bdw.h"
203#include "i915_oa_chv.h"
204#include "i915_oa_sklgt2.h"
205#include "i915_oa_sklgt3.h"
206#include "i915_oa_sklgt4.h"
207#include "i915_oa_bxt.h"
Lionel Landwerlin6c5c1d82017-06-13 12:23:08 +0100208#include "i915_oa_kblgt2.h"
209#include "i915_oa_kblgt3.h"
Lionel Landwerlin28c7ef92017-06-13 12:23:09 +0100210#include "i915_oa_glk.h"
Lionel Landwerlin22ea4f32017-09-18 12:21:24 +0100211#include "i915_oa_cflgt2.h"
Lionel Landwerlin4407eaa2017-11-10 19:08:40 +0000212#include "i915_oa_cflgt3.h"
Lionel Landwerlin95690a02017-11-10 19:08:43 +0000213#include "i915_oa_cnl.h"
Lionel Landwerlin1de401c2018-03-26 14:39:48 +0100214#include "i915_oa_icl.h"
Robert Braggd7965152016-11-07 19:49:52 +0000215
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200216/* HW requires this to be a power of two, between 128k and 16M, though driver
217 * is currently generally designed assuming the largest 16M size is used such
218 * that the overflow cases are unlikely in normal operation.
219 */
220#define OA_BUFFER_SIZE SZ_16M
221
222#define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
Robert Braggd7965152016-11-07 19:49:52 +0000223
Robert Bragg0dd860c2017-05-11 16:43:28 +0100224/**
225 * DOC: OA Tail Pointer Race
226 *
227 * There's a HW race condition between OA unit tail pointer register updates and
Robert Braggd7965152016-11-07 19:49:52 +0000228 * writes to memory whereby the tail pointer can sometimes get ahead of what's
Robert Bragg0dd860c2017-05-11 16:43:28 +0100229 * been written out to the OA buffer so far (in terms of what's visible to the
230 * CPU).
Robert Braggd7965152016-11-07 19:49:52 +0000231 *
Robert Bragg0dd860c2017-05-11 16:43:28 +0100232 * Although this can be observed explicitly while copying reports to userspace
233 * by checking for a zeroed report-id field in tail reports, we want to account
Robert Bragg19f81df2017-06-13 12:23:03 +0100234 * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
Robert Bragg0dd860c2017-05-11 16:43:28 +0100235 * read() attempts.
Robert Braggd7965152016-11-07 19:49:52 +0000236 *
Robert Bragg0dd860c2017-05-11 16:43:28 +0100237 * In effect we define a tail pointer for reading that lags the real tail
238 * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
239 * time for the corresponding reports to become visible to the CPU.
Robert Braggd7965152016-11-07 19:49:52 +0000240 *
Robert Bragg0dd860c2017-05-11 16:43:28 +0100241 * To manage this we actually track two tail pointers:
242 * 1) An 'aging' tail with an associated timestamp that is tracked until we
243 * can trust the corresponding data is visible to the CPU; at which point
244 * it is considered 'aged'.
245 * 2) An 'aged' tail that can be used for read()ing.
246 *
247 * The two separate pointers let us decouple read()s from tail pointer aging.
248 *
249 * The tail pointers are checked and updated at a limited rate within a hrtimer
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800250 * callback (the same callback that is used for delivering EPOLLIN events)
Robert Bragg0dd860c2017-05-11 16:43:28 +0100251 *
252 * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
253 * indicates that an updated tail pointer is needed.
254 *
255 * Most of the implementation details for this workaround are in
Robert Bragg19f81df2017-06-13 12:23:03 +0100256 * oa_buffer_check_unlocked() and _append_oa_reports()
Robert Bragg0dd860c2017-05-11 16:43:28 +0100257 *
258 * Note for posterity: previously the driver used to define an effective tail
259 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
260 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
261 * This was flawed considering that the OA unit may also automatically generate
262 * non-periodic reports (such as on context switch) or the OA unit may be
263 * enabled without any periodic sampling.
Robert Braggd7965152016-11-07 19:49:52 +0000264 */
265#define OA_TAIL_MARGIN_NSEC 100000ULL
Robert Bragg0dd860c2017-05-11 16:43:28 +0100266#define INVALID_TAIL_PTR 0xffffffff
Robert Braggd7965152016-11-07 19:49:52 +0000267
268/* frequency for checking whether the OA unit has written new reports to the
269 * circular OA buffer...
270 */
271#define POLL_FREQUENCY 200
272#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
273
Robert Braggccdf6342016-11-07 19:49:54 +0000274/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
275static int zero;
276static int one = 1;
277static u32 i915_perf_stream_paranoid = true;
278
Robert Braggd7965152016-11-07 19:49:52 +0000279/* The maximum exponent the hardware accepts is 63 (essentially it selects one
280 * of the 64bit timestamp bits to trigger reports from) but there's currently
281 * no known use case for sampling as infrequently as once per 47 thousand years.
282 *
283 * Since the timestamps included in OA reports are only 32bits it seems
284 * reasonable to limit the OA exponent where it's still possible to account for
285 * overflow in OA report timestamps.
286 */
287#define OA_EXPONENT_MAX 31
288
289#define INVALID_CTX_ID 0xffffffff
290
Robert Bragg19f81df2017-06-13 12:23:03 +0100291/* On Gen8+ automatically triggered OA reports include a 'reason' field... */
292#define OAREPORT_REASON_MASK 0x3f
293#define OAREPORT_REASON_SHIFT 19
294#define OAREPORT_REASON_TIMER (1<<0)
295#define OAREPORT_REASON_CTX_SWITCH (1<<3)
296#define OAREPORT_REASON_CLK_RATIO (1<<5)
297
Robert Braggd7965152016-11-07 19:49:52 +0000298
Robert Bragg00319ba2016-11-07 19:49:55 +0000299/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
300 *
Robert Bragg155e9412017-06-13 12:23:05 +0100301 * The highest sampling frequency we can theoretically program the OA unit
302 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
303 *
304 * Initialized just before we register the sysctl parameter.
Robert Bragg00319ba2016-11-07 19:49:55 +0000305 */
Robert Bragg155e9412017-06-13 12:23:05 +0100306static int oa_sample_rate_hard_limit;
Robert Bragg00319ba2016-11-07 19:49:55 +0000307
308/* Theoretically we can program the OA unit to sample every 160ns but don't
309 * allow that by default unless root...
310 *
311 * The default threshold of 100000Hz is based on perf's similar
312 * kernel.perf_event_max_sample_rate sysctl parameter.
313 */
314static u32 i915_oa_max_sample_rate = 100000;
315
Robert Braggd7965152016-11-07 19:49:52 +0000316/* XXX: beware if future OA HW adds new report formats that the current
317 * code assumes all reports have a power-of-two size and ~(size - 1) can
318 * be used as a mask to align the OA tail pointer.
319 */
Jani Nikula6ebb6d82018-06-13 14:49:29 +0300320static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
Robert Braggd7965152016-11-07 19:49:52 +0000321 [I915_OA_FORMAT_A13] = { 0, 64 },
322 [I915_OA_FORMAT_A29] = { 1, 128 },
323 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
324 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
325 [I915_OA_FORMAT_B4_C8] = { 4, 64 },
326 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
327 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
328 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
329};
330
Jani Nikula6ebb6d82018-06-13 14:49:29 +0300331static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
Robert Bragg19f81df2017-06-13 12:23:03 +0100332 [I915_OA_FORMAT_A12] = { 0, 64 },
333 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
334 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
335 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
336};
337
Robert Braggd7965152016-11-07 19:49:52 +0000338#define SAMPLE_OA_REPORT (1<<0)
Robert Braggeec688e2016-11-07 19:49:47 +0000339
Robert Bragg16d98b32016-12-07 21:40:33 +0000340/**
341 * struct perf_open_properties - for validated properties given to open a stream
342 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
343 * @single_context: Whether a single or all gpu contexts should be monitored
344 * @ctx_handle: A gem ctx handle for use with @single_context
345 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
346 * @oa_format: An OA unit HW report format
347 * @oa_periodic: Whether to enable periodic OA unit sampling
348 * @oa_period_exponent: The OA unit sampling period is derived from this
349 *
350 * As read_properties_unlocked() enumerates and validates the properties given
351 * to open a stream of metrics the configuration is built up in the structure
352 * which starts out zero initialized.
353 */
Robert Braggeec688e2016-11-07 19:49:47 +0000354struct perf_open_properties {
355 u32 sample_flags;
356
357 u64 single_context:1;
358 u64 ctx_handle;
Robert Braggd7965152016-11-07 19:49:52 +0000359
360 /* OA sampling state */
361 int metrics_set;
362 int oa_format;
363 bool oa_periodic;
364 int oa_period_exponent;
Robert Braggeec688e2016-11-07 19:49:47 +0000365};
366
Lionel Landwerlinf89823c2017-08-03 18:05:50 +0100367static void free_oa_config(struct drm_i915_private *dev_priv,
368 struct i915_oa_config *oa_config)
369{
370 if (!PTR_ERR(oa_config->flex_regs))
371 kfree(oa_config->flex_regs);
372 if (!PTR_ERR(oa_config->b_counter_regs))
373 kfree(oa_config->b_counter_regs);
374 if (!PTR_ERR(oa_config->mux_regs))
375 kfree(oa_config->mux_regs);
376 kfree(oa_config);
377}
378
379static void put_oa_config(struct drm_i915_private *dev_priv,
380 struct i915_oa_config *oa_config)
381{
382 if (!atomic_dec_and_test(&oa_config->ref_count))
383 return;
384
385 free_oa_config(dev_priv, oa_config);
386}
387
388static int get_oa_config(struct drm_i915_private *dev_priv,
389 int metrics_set,
390 struct i915_oa_config **out_config)
391{
392 int ret;
393
394 if (metrics_set == 1) {
395 *out_config = &dev_priv->perf.oa.test_config;
396 atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
397 return 0;
398 }
399
400 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
401 if (ret)
402 return ret;
403
404 *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
405 if (!*out_config)
406 ret = -EINVAL;
407 else
408 atomic_inc(&(*out_config)->ref_count);
409
410 mutex_unlock(&dev_priv->perf.metrics_lock);
411
412 return ret;
413}
414
Robert Bragg19f81df2017-06-13 12:23:03 +0100415static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
416{
417 return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
418}
419
420static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
421{
422 u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
423
424 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
425}
426
Robert Bragg0dd860c2017-05-11 16:43:28 +0100427/**
Robert Bragg19f81df2017-06-13 12:23:03 +0100428 * oa_buffer_check_unlocked - check for data and update tail ptr state
Robert Bragg0dd860c2017-05-11 16:43:28 +0100429 * @dev_priv: i915 device instance
Robert Braggd7965152016-11-07 19:49:52 +0000430 *
Robert Bragg0dd860c2017-05-11 16:43:28 +0100431 * This is either called via fops (for blocking reads in user ctx) or the poll
432 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
433 * if there is data available for userspace to read.
Robert Braggd7965152016-11-07 19:49:52 +0000434 *
Robert Bragg0dd860c2017-05-11 16:43:28 +0100435 * This function is central to providing a workaround for the OA unit tail
436 * pointer having a race with respect to what data is visible to the CPU.
437 * It is responsible for reading tail pointers from the hardware and giving
438 * the pointers time to 'age' before they are made available for reading.
439 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
440 *
441 * Besides returning true when there is data available to read() this function
442 * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
443 * and .aged_tail_idx state used for reading.
444 *
445 * Note: It's safe to read OA config state here unlocked, assuming that this is
446 * only called while the stream is enabled, while the global OA configuration
447 * can't be modified.
448 *
449 * Returns: %true if the OA buffer contains data, else %false
Robert Braggd7965152016-11-07 19:49:52 +0000450 */
Robert Bragg19f81df2017-06-13 12:23:03 +0100451static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
Robert Braggd7965152016-11-07 19:49:52 +0000452{
453 int report_size = dev_priv->perf.oa.oa_buffer.format_size;
Robert Bragg0dd860c2017-05-11 16:43:28 +0100454 unsigned long flags;
455 unsigned int aged_idx;
Robert Bragg0dd860c2017-05-11 16:43:28 +0100456 u32 head, hw_tail, aged_tail, aging_tail;
457 u64 now;
Robert Braggd7965152016-11-07 19:49:52 +0000458
Robert Bragg0dd860c2017-05-11 16:43:28 +0100459 /* We have to consider the (unlikely) possibility that read() errors
460 * could result in an OA buffer reset which might reset the head,
461 * tails[] and aged_tail state.
462 */
463 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
464
465 /* NB: The head we observe here might effectively be a little out of
466 * date (between head and tails[aged_idx].offset if there is currently
467 * a read() in progress.
468 */
469 head = dev_priv->perf.oa.oa_buffer.head;
470
471 aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
472 aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
473 aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
474
Robert Bragg19f81df2017-06-13 12:23:03 +0100475 hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
Robert Bragg0dd860c2017-05-11 16:43:28 +0100476
477 /* The tail pointer increases in 64 byte increments,
478 * not in report_size steps...
479 */
480 hw_tail &= ~(report_size - 1);
481
482 now = ktime_get_mono_fast_ns();
483
Robert Bragg4117ebc2017-05-11 16:43:30 +0100484 /* Update the aged tail
485 *
486 * Flip the tail pointer available for read()s once the aging tail is
487 * old enough to trust that the corresponding data will be visible to
488 * the CPU...
489 *
490 * Do this before updating the aging pointer in case we may be able to
491 * immediately start aging a new pointer too (if new data has become
492 * available) without needing to wait for a later hrtimer callback.
493 */
494 if (aging_tail != INVALID_TAIL_PTR &&
495 ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
496 OA_TAIL_MARGIN_NSEC)) {
Robert Bragg19f81df2017-06-13 12:23:03 +0100497
Robert Bragg4117ebc2017-05-11 16:43:30 +0100498 aged_idx ^= 1;
499 dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
500
501 aged_tail = aging_tail;
502
503 /* Mark that we need a new pointer to start aging... */
504 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
505 aging_tail = INVALID_TAIL_PTR;
506 }
507
Robert Bragg0dd860c2017-05-11 16:43:28 +0100508 /* Update the aging tail
509 *
510 * We throttle aging tail updates until we have a new tail that
511 * represents >= one report more data than is already available for
512 * reading. This ensures there will be enough data for a successful
513 * read once this new pointer has aged and ensures we will give the new
514 * pointer time to age.
515 */
516 if (aging_tail == INVALID_TAIL_PTR &&
517 (aged_tail == INVALID_TAIL_PTR ||
518 OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
519 struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
520 u32 gtt_offset = i915_ggtt_offset(vma);
521
522 /* Be paranoid and do a bounds check on the pointer read back
523 * from hardware, just in case some spurious hardware condition
524 * could put the tail out of bounds...
525 */
526 if (hw_tail >= gtt_offset &&
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200527 hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
Robert Bragg0dd860c2017-05-11 16:43:28 +0100528 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
529 aging_tail = hw_tail;
530 dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
531 } else {
532 DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
533 hw_tail);
534 }
535 }
536
Robert Bragg0dd860c2017-05-11 16:43:28 +0100537 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
538
539 return aged_tail == INVALID_TAIL_PTR ?
540 false : OA_TAKEN(aged_tail, head) >= report_size;
Robert Braggd7965152016-11-07 19:49:52 +0000541}
542
543/**
Robert Bragg16d98b32016-12-07 21:40:33 +0000544 * append_oa_status - Appends a status record to a userspace read() buffer.
545 * @stream: An i915-perf stream opened for OA metrics
546 * @buf: destination buffer given by userspace
547 * @count: the number of bytes userspace wants to read
548 * @offset: (inout): the current position for writing into @buf
549 * @type: The kind of status to report to userspace
550 *
551 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
552 * into the userspace read() buffer.
553 *
554 * The @buf @offset will only be updated on success.
555 *
556 * Returns: 0 on success, negative error code on failure.
Robert Braggd7965152016-11-07 19:49:52 +0000557 */
558static int append_oa_status(struct i915_perf_stream *stream,
559 char __user *buf,
560 size_t count,
561 size_t *offset,
562 enum drm_i915_perf_record_type type)
563{
564 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
565
566 if ((count - *offset) < header.size)
567 return -ENOSPC;
568
569 if (copy_to_user(buf + *offset, &header, sizeof(header)))
570 return -EFAULT;
571
572 (*offset) += header.size;
573
574 return 0;
575}
576
577/**
Robert Bragg16d98b32016-12-07 21:40:33 +0000578 * append_oa_sample - Copies single OA report into userspace read() buffer.
579 * @stream: An i915-perf stream opened for OA metrics
580 * @buf: destination buffer given by userspace
581 * @count: the number of bytes userspace wants to read
582 * @offset: (inout): the current position for writing into @buf
583 * @report: A single OA report to (optionally) include as part of the sample
584 *
585 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
586 * properties when opening a stream, tracked as `stream->sample_flags`. This
587 * function copies the requested components of a single sample to the given
588 * read() @buf.
589 *
590 * The @buf @offset will only be updated on success.
591 *
592 * Returns: 0 on success, negative error code on failure.
Robert Braggd7965152016-11-07 19:49:52 +0000593 */
594static int append_oa_sample(struct i915_perf_stream *stream,
595 char __user *buf,
596 size_t count,
597 size_t *offset,
598 const u8 *report)
599{
600 struct drm_i915_private *dev_priv = stream->dev_priv;
601 int report_size = dev_priv->perf.oa.oa_buffer.format_size;
602 struct drm_i915_perf_record_header header;
603 u32 sample_flags = stream->sample_flags;
604
605 header.type = DRM_I915_PERF_RECORD_SAMPLE;
606 header.pad = 0;
607 header.size = stream->sample_size;
608
609 if ((count - *offset) < header.size)
610 return -ENOSPC;
611
612 buf += *offset;
613 if (copy_to_user(buf, &header, sizeof(header)))
614 return -EFAULT;
615 buf += sizeof(header);
616
617 if (sample_flags & SAMPLE_OA_REPORT) {
618 if (copy_to_user(buf, report, report_size))
619 return -EFAULT;
620 }
621
622 (*offset) += header.size;
623
624 return 0;
625}
626
627/**
628 * Copies all buffered OA reports into userspace read() buffer.
629 * @stream: An i915-perf stream opened for OA metrics
630 * @buf: destination buffer given by userspace
631 * @count: the number of bytes userspace wants to read
632 * @offset: (inout): the current position for writing into @buf
Robert Braggd7965152016-11-07 19:49:52 +0000633 *
Robert Bragg16d98b32016-12-07 21:40:33 +0000634 * Notably any error condition resulting in a short read (-%ENOSPC or
635 * -%EFAULT) will be returned even though one or more records may
Robert Braggd7965152016-11-07 19:49:52 +0000636 * have been successfully copied. In this case it's up to the caller
637 * to decide if the error should be squashed before returning to
638 * userspace.
639 *
640 * Note: reports are consumed from the head, and appended to the
Robert Bragge81b3a52017-05-11 16:43:24 +0100641 * tail, so the tail chases the head?... If you think that's mad
Robert Braggd7965152016-11-07 19:49:52 +0000642 * and back-to-front you're not alone, but this follows the
643 * Gen PRM naming convention.
Robert Bragg16d98b32016-12-07 21:40:33 +0000644 *
645 * Returns: 0 on success, negative error code on failure.
Robert Braggd7965152016-11-07 19:49:52 +0000646 */
Robert Bragg19f81df2017-06-13 12:23:03 +0100647static int gen8_append_oa_reports(struct i915_perf_stream *stream,
648 char __user *buf,
649 size_t count,
650 size_t *offset)
651{
652 struct drm_i915_private *dev_priv = stream->dev_priv;
653 int report_size = dev_priv->perf.oa.oa_buffer.format_size;
654 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
655 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200656 u32 mask = (OA_BUFFER_SIZE - 1);
Robert Bragg19f81df2017-06-13 12:23:03 +0100657 size_t start_offset = *offset;
658 unsigned long flags;
659 unsigned int aged_tail_idx;
660 u32 head, tail;
661 u32 taken;
662 int ret = 0;
663
664 if (WARN_ON(!stream->enabled))
665 return -EIO;
666
667 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
668
669 head = dev_priv->perf.oa.oa_buffer.head;
670 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
671 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
672
673 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
674
675 /*
676 * An invalid tail pointer here means we're still waiting for the poll
677 * hrtimer callback to give us a pointer
678 */
679 if (tail == INVALID_TAIL_PTR)
680 return -EAGAIN;
681
682 /*
683 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
684 * while indexing relative to oa_buf_base.
685 */
686 head -= gtt_offset;
687 tail -= gtt_offset;
688
689 /*
690 * An out of bounds or misaligned head or tail pointer implies a driver
691 * bug since we validate + align the tail pointers we read from the
692 * hardware and we are in full control of the head pointer which should
693 * only be incremented by multiples of the report size (notably also
694 * all a power of two).
695 */
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200696 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
697 tail > OA_BUFFER_SIZE || tail % report_size,
Robert Bragg19f81df2017-06-13 12:23:03 +0100698 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
699 head, tail))
700 return -EIO;
701
702
703 for (/* none */;
704 (taken = OA_TAKEN(tail, head));
705 head = (head + report_size) & mask) {
706 u8 *report = oa_buf_base + head;
707 u32 *report32 = (void *)report;
708 u32 ctx_id;
709 u32 reason;
710
711 /*
712 * All the report sizes factor neatly into the buffer
713 * size so we never expect to see a report split
714 * between the beginning and end of the buffer.
715 *
716 * Given the initial alignment check a misalignment
717 * here would imply a driver bug that would result
718 * in an overrun.
719 */
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200720 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
Robert Bragg19f81df2017-06-13 12:23:03 +0100721 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
722 break;
723 }
724
725 /*
726 * The reason field includes flags identifying what
727 * triggered this specific report (mostly timer
728 * triggered or e.g. due to a context switch).
729 *
730 * This field is never expected to be zero so we can
731 * check that the report isn't invalid before copying
732 * it to userspace...
733 */
734 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
735 OAREPORT_REASON_MASK);
736 if (reason == 0) {
737 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
738 DRM_NOTE("Skipping spurious, invalid OA report\n");
739 continue;
740 }
741
Lionel Landwerlin61d56762018-06-02 12:29:46 +0100742 ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
Robert Bragg19f81df2017-06-13 12:23:03 +0100743
744 /*
745 * Squash whatever is in the CTX_ID field if it's marked as
746 * invalid to be sure we avoid false-positive, single-context
747 * filtering below...
748 *
749 * Note: that we don't clear the valid_ctx_bit so userspace can
750 * understand that the ID has been squashed by the kernel.
751 */
752 if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
753 ctx_id = report32[2] = INVALID_CTX_ID;
754
755 /*
756 * NB: For Gen 8 the OA unit no longer supports clock gating
757 * off for a specific context and the kernel can't securely
758 * stop the counters from updating as system-wide / global
759 * values.
760 *
761 * Automatic reports now include a context ID so reports can be
762 * filtered on the cpu but it's not worth trying to
763 * automatically subtract/hide counter progress for other
764 * contexts while filtering since we can't stop userspace
765 * issuing MI_REPORT_PERF_COUNT commands which would still
766 * provide a side-band view of the real values.
767 *
768 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
769 * to normalize counters for a single filtered context then it
770 * needs be forwarded bookend context-switch reports so that it
771 * can track switches in between MI_REPORT_PERF_COUNT commands
772 * and can itself subtract/ignore the progress of counters
773 * associated with other contexts. Note that the hardware
774 * automatically triggers reports when switching to a new
775 * context which are tagged with the ID of the newly active
776 * context. To avoid the complexity (and likely fragility) of
777 * reading ahead while parsing reports to try and minimize
778 * forwarding redundant context switch reports (i.e. between
779 * other, unrelated contexts) we simply elect to forward them
780 * all.
781 *
782 * We don't rely solely on the reason field to identify context
783 * switches since it's not-uncommon for periodic samples to
784 * identify a switch before any 'context switch' report.
785 */
786 if (!dev_priv->perf.oa.exclusive_stream->ctx ||
787 dev_priv->perf.oa.specific_ctx_id == ctx_id ||
788 (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
789 dev_priv->perf.oa.specific_ctx_id) ||
790 reason & OAREPORT_REASON_CTX_SWITCH) {
791
792 /*
793 * While filtering for a single context we avoid
794 * leaking the IDs of other contexts.
795 */
796 if (dev_priv->perf.oa.exclusive_stream->ctx &&
797 dev_priv->perf.oa.specific_ctx_id != ctx_id) {
798 report32[2] = INVALID_CTX_ID;
799 }
800
801 ret = append_oa_sample(stream, buf, count, offset,
802 report);
803 if (ret)
804 break;
805
806 dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
807 }
808
809 /*
810 * The above reason field sanity check is based on
811 * the assumption that the OA buffer is initially
812 * zeroed and we reset the field after copying so the
813 * check is still meaningful once old reports start
814 * being overwritten.
815 */
816 report32[0] = 0;
817 }
818
819 if (start_offset != *offset) {
820 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
821
822 /*
823 * We removed the gtt_offset for the copy loop above, indexing
824 * relative to oa_buf_base so put back here...
825 */
826 head += gtt_offset;
827
828 I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
829 dev_priv->perf.oa.oa_buffer.head = head;
830
831 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
832 }
833
834 return ret;
835}
836
837/**
838 * gen8_oa_read - copy status records then buffered OA reports
839 * @stream: An i915-perf stream opened for OA metrics
840 * @buf: destination buffer given by userspace
841 * @count: the number of bytes userspace wants to read
842 * @offset: (inout): the current position for writing into @buf
843 *
844 * Checks OA unit status registers and if necessary appends corresponding
845 * status records for userspace (such as for a buffer full condition) and then
846 * initiate appending any buffered OA reports.
847 *
848 * Updates @offset according to the number of bytes successfully copied into
849 * the userspace buffer.
850 *
851 * NB: some data may be successfully copied to the userspace buffer
852 * even if an error is returned, and this is reflected in the
853 * updated @offset.
854 *
855 * Returns: zero on success or a negative error code
856 */
857static int gen8_oa_read(struct i915_perf_stream *stream,
858 char __user *buf,
859 size_t count,
860 size_t *offset)
861{
862 struct drm_i915_private *dev_priv = stream->dev_priv;
863 u32 oastatus;
864 int ret;
865
866 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
867 return -EIO;
868
869 oastatus = I915_READ(GEN8_OASTATUS);
870
871 /*
872 * We treat OABUFFER_OVERFLOW as a significant error:
873 *
874 * Although theoretically we could handle this more gracefully
875 * sometimes, some Gens don't correctly suppress certain
876 * automatically triggered reports in this condition and so we
877 * have to assume that old reports are now being trampled
878 * over.
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200879 *
880 * Considering how we don't currently give userspace control
881 * over the OA buffer size and always configure a large 16MB
882 * buffer, then a buffer overflow does anyway likely indicate
883 * that something has gone quite badly wrong.
Robert Bragg19f81df2017-06-13 12:23:03 +0100884 */
885 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
886 ret = append_oa_status(stream, buf, count, offset,
887 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
888 if (ret)
889 return ret;
890
891 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
892 dev_priv->perf.oa.period_exponent);
893
Lionel Landwerlin5728de22018-10-23 11:07:06 +0100894 dev_priv->perf.oa.ops.oa_disable(stream);
895 dev_priv->perf.oa.ops.oa_enable(stream);
Robert Bragg19f81df2017-06-13 12:23:03 +0100896
897 /*
898 * Note: .oa_enable() is expected to re-init the oabuffer and
899 * reset GEN8_OASTATUS for us
900 */
901 oastatus = I915_READ(GEN8_OASTATUS);
902 }
903
904 if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
905 ret = append_oa_status(stream, buf, count, offset,
906 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
907 if (ret)
908 return ret;
909 I915_WRITE(GEN8_OASTATUS,
910 oastatus & ~GEN8_OASTATUS_REPORT_LOST);
911 }
912
913 return gen8_append_oa_reports(stream, buf, count, offset);
914}
915
916/**
917 * Copies all buffered OA reports into userspace read() buffer.
918 * @stream: An i915-perf stream opened for OA metrics
919 * @buf: destination buffer given by userspace
920 * @count: the number of bytes userspace wants to read
921 * @offset: (inout): the current position for writing into @buf
922 *
923 * Notably any error condition resulting in a short read (-%ENOSPC or
924 * -%EFAULT) will be returned even though one or more records may
925 * have been successfully copied. In this case it's up to the caller
926 * to decide if the error should be squashed before returning to
927 * userspace.
928 *
929 * Note: reports are consumed from the head, and appended to the
930 * tail, so the tail chases the head?... If you think that's mad
931 * and back-to-front you're not alone, but this follows the
932 * Gen PRM naming convention.
933 *
934 * Returns: 0 on success, negative error code on failure.
935 */
Robert Braggd7965152016-11-07 19:49:52 +0000936static int gen7_append_oa_reports(struct i915_perf_stream *stream,
937 char __user *buf,
938 size_t count,
Robert Bragg3bb335c2017-05-11 16:43:27 +0100939 size_t *offset)
Robert Braggd7965152016-11-07 19:49:52 +0000940{
941 struct drm_i915_private *dev_priv = stream->dev_priv;
942 int report_size = dev_priv->perf.oa.oa_buffer.format_size;
943 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
Robert Braggd7965152016-11-07 19:49:52 +0000944 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200945 u32 mask = (OA_BUFFER_SIZE - 1);
Robert Bragg3bb335c2017-05-11 16:43:27 +0100946 size_t start_offset = *offset;
Robert Bragg0dd860c2017-05-11 16:43:28 +0100947 unsigned long flags;
948 unsigned int aged_tail_idx;
949 u32 head, tail;
Robert Braggd7965152016-11-07 19:49:52 +0000950 u32 taken;
951 int ret = 0;
952
953 if (WARN_ON(!stream->enabled))
954 return -EIO;
955
Robert Bragg0dd860c2017-05-11 16:43:28 +0100956 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
Robert Braggf2790202017-05-11 16:43:26 +0100957
Robert Bragg0dd860c2017-05-11 16:43:28 +0100958 head = dev_priv->perf.oa.oa_buffer.head;
959 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
960 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
961
962 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
963
964 /* An invalid tail pointer here means we're still waiting for the poll
965 * hrtimer callback to give us a pointer
Robert Braggf2790202017-05-11 16:43:26 +0100966 */
Robert Bragg0dd860c2017-05-11 16:43:28 +0100967 if (tail == INVALID_TAIL_PTR)
Robert Braggd7965152016-11-07 19:49:52 +0000968 return -EAGAIN;
969
Robert Bragg0dd860c2017-05-11 16:43:28 +0100970 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
971 * while indexing relative to oa_buf_base.
972 */
973 head -= gtt_offset;
974 tail -= gtt_offset;
975
976 /* An out of bounds or misaligned head or tail pointer implies a driver
977 * bug since we validate + align the tail pointers we read from the
978 * hardware and we are in full control of the head pointer which should
979 * only be incremented by multiples of the report size (notably also
980 * all a power of two).
981 */
Joonas Lahtinenfe841682018-11-16 15:55:09 +0200982 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
983 tail > OA_BUFFER_SIZE || tail % report_size,
Robert Bragg0dd860c2017-05-11 16:43:28 +0100984 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
985 head, tail))
986 return -EIO;
987
Robert Braggd7965152016-11-07 19:49:52 +0000988
989 for (/* none */;
990 (taken = OA_TAKEN(tail, head));
991 head = (head + report_size) & mask) {
992 u8 *report = oa_buf_base + head;
993 u32 *report32 = (void *)report;
994
995 /* All the report sizes factor neatly into the buffer
996 * size so we never expect to see a report split
997 * between the beginning and end of the buffer.
998 *
999 * Given the initial alignment check a misalignment
1000 * here would imply a driver bug that would result
1001 * in an overrun.
1002 */
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001003 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
Robert Braggd7965152016-11-07 19:49:52 +00001004 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
1005 break;
1006 }
1007
1008 /* The report-ID field for periodic samples includes
1009 * some undocumented flags related to what triggered
1010 * the report and is never expected to be zero so we
1011 * can check that the report isn't invalid before
1012 * copying it to userspace...
1013 */
1014 if (report32[0] == 0) {
Robert Bragg712122e2017-05-11 16:43:31 +01001015 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
1016 DRM_NOTE("Skipping spurious, invalid OA report\n");
Robert Braggd7965152016-11-07 19:49:52 +00001017 continue;
1018 }
1019
1020 ret = append_oa_sample(stream, buf, count, offset, report);
1021 if (ret)
1022 break;
1023
1024 /* The above report-id field sanity check is based on
1025 * the assumption that the OA buffer is initially
1026 * zeroed and we reset the field after copying so the
1027 * check is still meaningful once old reports start
1028 * being overwritten.
1029 */
1030 report32[0] = 0;
1031 }
1032
Robert Bragg3bb335c2017-05-11 16:43:27 +01001033 if (start_offset != *offset) {
Robert Bragg0dd860c2017-05-11 16:43:28 +01001034 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1035
Robert Bragg3bb335c2017-05-11 16:43:27 +01001036 /* We removed the gtt_offset for the copy loop above, indexing
1037 * relative to oa_buf_base so put back here...
1038 */
1039 head += gtt_offset;
1040
1041 I915_WRITE(GEN7_OASTATUS2,
1042 ((head & GEN7_OASTATUS2_HEAD_MASK) |
Lionel Landwerlinb82ed432018-03-26 10:08:26 +01001043 GEN7_OASTATUS2_MEM_SELECT_GGTT));
Robert Bragg3bb335c2017-05-11 16:43:27 +01001044 dev_priv->perf.oa.oa_buffer.head = head;
Robert Bragg0dd860c2017-05-11 16:43:28 +01001045
1046 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
Robert Bragg3bb335c2017-05-11 16:43:27 +01001047 }
Robert Braggd7965152016-11-07 19:49:52 +00001048
1049 return ret;
1050}
1051
Robert Bragg16d98b32016-12-07 21:40:33 +00001052/**
1053 * gen7_oa_read - copy status records then buffered OA reports
1054 * @stream: An i915-perf stream opened for OA metrics
1055 * @buf: destination buffer given by userspace
1056 * @count: the number of bytes userspace wants to read
1057 * @offset: (inout): the current position for writing into @buf
1058 *
1059 * Checks Gen 7 specific OA unit status registers and if necessary appends
1060 * corresponding status records for userspace (such as for a buffer full
1061 * condition) and then initiate appending any buffered OA reports.
1062 *
1063 * Updates @offset according to the number of bytes successfully copied into
1064 * the userspace buffer.
1065 *
1066 * Returns: zero on success or a negative error code
1067 */
Robert Braggd7965152016-11-07 19:49:52 +00001068static int gen7_oa_read(struct i915_perf_stream *stream,
1069 char __user *buf,
1070 size_t count,
1071 size_t *offset)
1072{
1073 struct drm_i915_private *dev_priv = stream->dev_priv;
Robert Braggd7965152016-11-07 19:49:52 +00001074 u32 oastatus1;
Robert Braggd7965152016-11-07 19:49:52 +00001075 int ret;
1076
1077 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
1078 return -EIO;
1079
Robert Braggd7965152016-11-07 19:49:52 +00001080 oastatus1 = I915_READ(GEN7_OASTATUS1);
1081
Robert Braggd7965152016-11-07 19:49:52 +00001082 /* XXX: On Haswell we don't have a safe way to clear oastatus1
1083 * bits while the OA unit is enabled (while the tail pointer
1084 * may be updated asynchronously) so we ignore status bits
1085 * that have already been reported to userspace.
1086 */
1087 oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
1088
1089 /* We treat OABUFFER_OVERFLOW as a significant error:
1090 *
1091 * - The status can be interpreted to mean that the buffer is
1092 * currently full (with a higher precedence than OA_TAKEN()
1093 * which will start to report a near-empty buffer after an
1094 * overflow) but it's awkward that we can't clear the status
1095 * on Haswell, so without a reset we won't be able to catch
1096 * the state again.
1097 *
1098 * - Since it also implies the HW has started overwriting old
1099 * reports it may also affect our sanity checks for invalid
1100 * reports when copying to userspace that assume new reports
1101 * are being written to cleared memory.
1102 *
1103 * - In the future we may want to introduce a flight recorder
1104 * mode where the driver will automatically maintain a safe
1105 * guard band between head/tail, avoiding this overflow
1106 * condition, but we avoid the added driver complexity for
1107 * now.
1108 */
1109 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1110 ret = append_oa_status(stream, buf, count, offset,
1111 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1112 if (ret)
1113 return ret;
1114
Robert Bragg19f81df2017-06-13 12:23:03 +01001115 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1116 dev_priv->perf.oa.period_exponent);
Robert Braggd7965152016-11-07 19:49:52 +00001117
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001118 dev_priv->perf.oa.ops.oa_disable(stream);
1119 dev_priv->perf.oa.ops.oa_enable(stream);
Robert Braggd7965152016-11-07 19:49:52 +00001120
Robert Braggd7965152016-11-07 19:49:52 +00001121 oastatus1 = I915_READ(GEN7_OASTATUS1);
Robert Braggd7965152016-11-07 19:49:52 +00001122 }
1123
1124 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1125 ret = append_oa_status(stream, buf, count, offset,
1126 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1127 if (ret)
1128 return ret;
1129 dev_priv->perf.oa.gen7_latched_oastatus1 |=
1130 GEN7_OASTATUS1_REPORT_LOST;
1131 }
1132
Robert Bragg3bb335c2017-05-11 16:43:27 +01001133 return gen7_append_oa_reports(stream, buf, count, offset);
Robert Braggd7965152016-11-07 19:49:52 +00001134}
1135
Robert Bragg16d98b32016-12-07 21:40:33 +00001136/**
1137 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1138 * @stream: An i915-perf stream opened for OA metrics
1139 *
1140 * Called when userspace tries to read() from a blocking stream FD opened
1141 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1142 * OA buffer and wakes us.
1143 *
1144 * Note: it's acceptable to have this return with some false positives
1145 * since any subsequent read handling will return -EAGAIN if there isn't
1146 * really data ready for userspace yet.
1147 *
1148 * Returns: zero on success or a negative error code
1149 */
Robert Braggd7965152016-11-07 19:49:52 +00001150static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1151{
1152 struct drm_i915_private *dev_priv = stream->dev_priv;
1153
1154 /* We would wait indefinitely if periodic sampling is not enabled */
1155 if (!dev_priv->perf.oa.periodic)
1156 return -EIO;
1157
Robert Braggd7965152016-11-07 19:49:52 +00001158 return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
Robert Bragg19f81df2017-06-13 12:23:03 +01001159 oa_buffer_check_unlocked(dev_priv));
Robert Braggd7965152016-11-07 19:49:52 +00001160}
1161
Robert Bragg16d98b32016-12-07 21:40:33 +00001162/**
1163 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1164 * @stream: An i915-perf stream opened for OA metrics
1165 * @file: An i915 perf stream file
1166 * @wait: poll() state table
1167 *
1168 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1169 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1170 * when it sees data ready to read in the circular OA buffer.
1171 */
Robert Braggd7965152016-11-07 19:49:52 +00001172static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1173 struct file *file,
1174 poll_table *wait)
1175{
1176 struct drm_i915_private *dev_priv = stream->dev_priv;
1177
1178 poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
1179}
1180
Robert Bragg16d98b32016-12-07 21:40:33 +00001181/**
1182 * i915_oa_read - just calls through to &i915_oa_ops->read
1183 * @stream: An i915-perf stream opened for OA metrics
1184 * @buf: destination buffer given by userspace
1185 * @count: the number of bytes userspace wants to read
1186 * @offset: (inout): the current position for writing into @buf
1187 *
1188 * Updates @offset according to the number of bytes successfully copied into
1189 * the userspace buffer.
1190 *
1191 * Returns: zero on success or a negative error code
1192 */
Robert Braggd7965152016-11-07 19:49:52 +00001193static int i915_oa_read(struct i915_perf_stream *stream,
1194 char __user *buf,
1195 size_t count,
1196 size_t *offset)
1197{
1198 struct drm_i915_private *dev_priv = stream->dev_priv;
1199
1200 return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
1201}
1202
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001203static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
1204 struct i915_gem_context *ctx)
1205{
Chris Wilson8a68d462019-03-05 18:03:30 +00001206 struct intel_engine_cs *engine = i915->engine[RCS0];
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001207 struct intel_context *ce;
Chris Wilsonfa9f6682019-04-26 17:33:29 +01001208 int err;
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001209
Chris Wilsonfa9f6682019-04-26 17:33:29 +01001210 ce = intel_context_instance(ctx, engine);
1211 if (IS_ERR(ce))
1212 return ce;
1213
1214 err = i915_mutex_lock_interruptible(&i915->drm);
1215 if (err) {
1216 intel_context_put(ce);
1217 return ERR_PTR(err);
1218 }
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001219
1220 /*
1221 * As the ID is the gtt offset of the context's vma we
1222 * pin the vma to ensure the ID remains fixed.
1223 *
1224 * NB: implied RCS engine...
1225 */
Chris Wilsonfa9f6682019-04-26 17:33:29 +01001226 err = intel_context_pin(ce);
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001227 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonfa9f6682019-04-26 17:33:29 +01001228 intel_context_put(ce);
1229 if (err)
1230 return ERR_PTR(err);
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001231
1232 i915->perf.oa.pinned_ctx = ce;
1233
1234 return ce;
1235}
1236
Robert Bragg16d98b32016-12-07 21:40:33 +00001237/**
1238 * oa_get_render_ctx_id - determine and hold ctx hw id
1239 * @stream: An i915-perf stream opened for OA metrics
1240 *
1241 * Determine the render context hw id, and ensure it remains fixed for the
Robert Braggd7965152016-11-07 19:49:52 +00001242 * lifetime of the stream. This ensures that we don't have to worry about
1243 * updating the context ID in OACONTROL on the fly.
Robert Bragg16d98b32016-12-07 21:40:33 +00001244 *
1245 * Returns: zero on success or a negative error code
Robert Braggd7965152016-11-07 19:49:52 +00001246 */
1247static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1248{
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001249 struct drm_i915_private *i915 = stream->dev_priv;
1250 struct intel_context *ce;
Robert Braggd7965152016-11-07 19:49:52 +00001251
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001252 ce = oa_pin_context(i915, stream->ctx);
1253 if (IS_ERR(ce))
1254 return PTR_ERR(ce);
Robert Braggd7965152016-11-07 19:49:52 +00001255
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001256 switch (INTEL_GEN(i915)) {
1257 case 7: {
Robert Bragg19f81df2017-06-13 12:23:03 +01001258 /*
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001259 * On Haswell we don't do any post processing of the reports
1260 * and don't need to use the mask.
Robert Bragg19f81df2017-06-13 12:23:03 +01001261 */
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001262 i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
1263 i915->perf.oa.specific_ctx_id_mask = 0;
1264 break;
Robert Bragg19f81df2017-06-13 12:23:03 +01001265 }
Robert Braggd7965152016-11-07 19:49:52 +00001266
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001267 case 8:
1268 case 9:
1269 case 10:
1270 if (USES_GUC_SUBMISSION(i915)) {
1271 /*
1272 * When using GuC, the context descriptor we write in
1273 * i915 is read by GuC and rewritten before it's
1274 * actually written into the hardware. The LRCA is
1275 * what is put into the context id field of the
1276 * context descriptor by GuC. Because it's aligned to
1277 * a page, the lower 12bits are always at 0 and
1278 * dropped by GuC. They won't be part of the context
1279 * ID in the OA reports, so squash those lower bits.
1280 */
1281 i915->perf.oa.specific_ctx_id =
1282 lower_32_bits(ce->lrc_desc) >> 12;
1283
1284 /*
1285 * GuC uses the top bit to signal proxy submission, so
1286 * ignore that bit.
1287 */
1288 i915->perf.oa.specific_ctx_id_mask =
1289 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1290 } else {
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001291 i915->perf.oa.specific_ctx_id_mask =
1292 (1U << GEN8_CTX_ID_WIDTH) - 1;
Michel Thierry9904b1562018-06-04 16:32:49 -07001293 i915->perf.oa.specific_ctx_id =
1294 upper_32_bits(ce->lrc_desc);
1295 i915->perf.oa.specific_ctx_id &=
1296 i915->perf.oa.specific_ctx_id_mask;
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001297 }
1298 break;
1299
1300 case 11: {
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001301 i915->perf.oa.specific_ctx_id_mask =
1302 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
1303 ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
1304 ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
Michel Thierry2b9a8202018-06-04 16:32:50 -07001305 i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
1306 i915->perf.oa.specific_ctx_id &=
1307 i915->perf.oa.specific_ctx_id_mask;
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001308 break;
1309 }
1310
1311 default:
1312 MISSING_CASE(INTEL_GEN(i915));
1313 }
1314
1315 DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1316 i915->perf.oa.specific_ctx_id,
1317 i915->perf.oa.specific_ctx_id_mask);
1318
Chris Wilson266a2402017-05-04 10:33:08 +01001319 return 0;
Robert Braggd7965152016-11-07 19:49:52 +00001320}
1321
Robert Bragg16d98b32016-12-07 21:40:33 +00001322/**
1323 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1324 * @stream: An i915-perf stream opened for OA metrics
1325 *
1326 * In case anything needed doing to ensure the context HW ID would remain valid
1327 * for the lifetime of the stream, then that can be undone here.
1328 */
Robert Braggd7965152016-11-07 19:49:52 +00001329static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1330{
1331 struct drm_i915_private *dev_priv = stream->dev_priv;
Chris Wilson1fc44d92018-05-17 22:26:32 +01001332 struct intel_context *ce;
Robert Braggd7965152016-11-07 19:49:52 +00001333
Chris Wilson1fc44d92018-05-17 22:26:32 +01001334 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
Lionel Landwerlin61d56762018-06-02 12:29:46 +01001335 dev_priv->perf.oa.specific_ctx_id_mask = 0;
Robert Braggd7965152016-11-07 19:49:52 +00001336
Chris Wilson1fc44d92018-05-17 22:26:32 +01001337 ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
1338 if (ce) {
Robert Bragg19f81df2017-06-13 12:23:03 +01001339 mutex_lock(&dev_priv->drm.struct_mutex);
Chris Wilson1fc44d92018-05-17 22:26:32 +01001340 intel_context_unpin(ce);
Robert Bragg19f81df2017-06-13 12:23:03 +01001341 mutex_unlock(&dev_priv->drm.struct_mutex);
1342 }
Robert Braggd7965152016-11-07 19:49:52 +00001343}
1344
1345static void
1346free_oa_buffer(struct drm_i915_private *i915)
1347{
1348 mutex_lock(&i915->drm.struct_mutex);
1349
Chris Wilson6a2f59e2018-07-21 13:50:37 +01001350 i915_vma_unpin_and_release(&i915->perf.oa.oa_buffer.vma,
1351 I915_VMA_RELEASE_MAP);
Robert Braggd7965152016-11-07 19:49:52 +00001352
1353 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilson6a2f59e2018-07-21 13:50:37 +01001354
1355 i915->perf.oa.oa_buffer.vaddr = NULL;
Robert Braggd7965152016-11-07 19:49:52 +00001356}
1357
1358static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1359{
1360 struct drm_i915_private *dev_priv = stream->dev_priv;
1361
1362 BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
1363
Robert Bragg19f81df2017-06-13 12:23:03 +01001364 /*
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01001365 * Unset exclusive_stream first, it will be checked while disabling
1366 * the metric set on gen8+.
Robert Bragg19f81df2017-06-13 12:23:03 +01001367 */
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001368 mutex_lock(&dev_priv->drm.struct_mutex);
Robert Bragg19f81df2017-06-13 12:23:03 +01001369 dev_priv->perf.oa.exclusive_stream = NULL;
Robert Braggd7965152016-11-07 19:49:52 +00001370 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00001371 mutex_unlock(&dev_priv->drm.struct_mutex);
Robert Braggd7965152016-11-07 19:49:52 +00001372
1373 free_oa_buffer(dev_priv);
1374
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001375 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6619c002019-01-14 14:21:15 +00001376 intel_runtime_pm_put(dev_priv, stream->wakeref);
Robert Braggd7965152016-11-07 19:49:52 +00001377
1378 if (stream->ctx)
1379 oa_put_render_ctx_id(stream);
1380
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01001381 put_oa_config(dev_priv, stream->oa_config);
1382
Robert Bragg712122e2017-05-11 16:43:31 +01001383 if (dev_priv->perf.oa.spurious_report_rs.missed) {
1384 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1385 dev_priv->perf.oa.spurious_report_rs.missed);
1386 }
Robert Braggd7965152016-11-07 19:49:52 +00001387}
1388
1389static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
1390{
1391 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
Robert Bragg0dd860c2017-05-11 16:43:28 +01001392 unsigned long flags;
1393
1394 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
Robert Braggd7965152016-11-07 19:49:52 +00001395
1396 /* Pre-DevBDW: OABUFFER must be set with counters off,
1397 * before OASTATUS1, but after OASTATUS2
1398 */
Lionel Landwerlinb82ed432018-03-26 10:08:26 +01001399 I915_WRITE(GEN7_OASTATUS2,
1400 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
Robert Braggf2790202017-05-11 16:43:26 +01001401 dev_priv->perf.oa.oa_buffer.head = gtt_offset;
1402
Robert Braggd7965152016-11-07 19:49:52 +00001403 I915_WRITE(GEN7_OABUFFER, gtt_offset);
Robert Braggf2790202017-05-11 16:43:26 +01001404
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001405 I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
Robert Braggd7965152016-11-07 19:49:52 +00001406
Robert Bragg0dd860c2017-05-11 16:43:28 +01001407 /* Mark that we need updated tail pointers to read from... */
1408 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1409 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1410
1411 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1412
Robert Braggd7965152016-11-07 19:49:52 +00001413 /* On Haswell we have to track which OASTATUS1 flags we've
1414 * already seen since they can't be cleared while periodic
1415 * sampling is enabled.
1416 */
1417 dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
1418
1419 /* NB: although the OA buffer will initially be allocated
1420 * zeroed via shmfs (and so this memset is redundant when
1421 * first allocating), we may re-init the OA buffer, either
1422 * when re-enabling a stream or in error/reset paths.
1423 *
1424 * The reason we clear the buffer for each re-init is for the
1425 * sanity check in gen7_append_oa_reports() that looks at the
1426 * report-id field to make sure it's non-zero which relies on
1427 * the assumption that new reports are being written to zeroed
1428 * memory...
1429 */
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001430 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
Robert Braggd7965152016-11-07 19:49:52 +00001431
1432 /* Maybe make ->pollin per-stream state if we support multiple
1433 * concurrent streams in the future.
1434 */
1435 dev_priv->perf.oa.pollin = false;
1436}
1437
Robert Bragg19f81df2017-06-13 12:23:03 +01001438static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
1439{
1440 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
1441 unsigned long flags;
1442
1443 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1444
1445 I915_WRITE(GEN8_OASTATUS, 0);
1446 I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
1447 dev_priv->perf.oa.oa_buffer.head = gtt_offset;
1448
1449 I915_WRITE(GEN8_OABUFFER_UDW, 0);
1450
1451 /*
1452 * PRM says:
1453 *
1454 * "This MMIO must be set before the OATAILPTR
1455 * register and after the OAHEADPTR register. This is
1456 * to enable proper functionality of the overflow
1457 * bit."
1458 */
1459 I915_WRITE(GEN8_OABUFFER, gtt_offset |
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001460 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
Robert Bragg19f81df2017-06-13 12:23:03 +01001461 I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1462
1463 /* Mark that we need updated tail pointers to read from... */
1464 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1465 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1466
1467 /*
1468 * Reset state used to recognise context switches, affecting which
1469 * reports we will forward to userspace while filtering for a single
1470 * context.
1471 */
1472 dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
1473
1474 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1475
1476 /*
1477 * NB: although the OA buffer will initially be allocated
1478 * zeroed via shmfs (and so this memset is redundant when
1479 * first allocating), we may re-init the OA buffer, either
1480 * when re-enabling a stream or in error/reset paths.
1481 *
1482 * The reason we clear the buffer for each re-init is for the
1483 * sanity check in gen8_append_oa_reports() that looks at the
1484 * reason field to make sure it's non-zero which relies on
1485 * the assumption that new reports are being written to zeroed
1486 * memory...
1487 */
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001488 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
Robert Bragg19f81df2017-06-13 12:23:03 +01001489
1490 /*
1491 * Maybe make ->pollin per-stream state if we support multiple
1492 * concurrent streams in the future.
1493 */
1494 dev_priv->perf.oa.pollin = false;
1495}
1496
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001497static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
Robert Braggd7965152016-11-07 19:49:52 +00001498{
1499 struct drm_i915_gem_object *bo;
1500 struct i915_vma *vma;
1501 int ret;
1502
1503 if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
1504 return -ENODEV;
1505
1506 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
1507 if (ret)
1508 return ret;
1509
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001510 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1511 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1512
1513 bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
Robert Braggd7965152016-11-07 19:49:52 +00001514 if (IS_ERR(bo)) {
1515 DRM_ERROR("Failed to allocate OA buffer\n");
1516 ret = PTR_ERR(bo);
1517 goto unlock;
1518 }
1519
Chris Wilsona679f582019-03-21 16:19:07 +00001520 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
Robert Braggd7965152016-11-07 19:49:52 +00001521
1522 /* PreHSW required 512K alignment, HSW requires 16M */
1523 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1524 if (IS_ERR(vma)) {
1525 ret = PTR_ERR(vma);
1526 goto err_unref;
1527 }
1528 dev_priv->perf.oa.oa_buffer.vma = vma;
1529
1530 dev_priv->perf.oa.oa_buffer.vaddr =
1531 i915_gem_object_pin_map(bo, I915_MAP_WB);
1532 if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
1533 ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
1534 goto err_unpin;
1535 }
1536
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001537 DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
Robert Braggd7965152016-11-07 19:49:52 +00001538 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
Joonas Lahtinenfe841682018-11-16 15:55:09 +02001539 dev_priv->perf.oa.oa_buffer.vaddr);
Robert Braggd7965152016-11-07 19:49:52 +00001540
1541 goto unlock;
1542
1543err_unpin:
1544 __i915_vma_unpin(vma);
1545
1546err_unref:
1547 i915_gem_object_put(bo);
1548
1549 dev_priv->perf.oa.oa_buffer.vaddr = NULL;
1550 dev_priv->perf.oa.oa_buffer.vma = NULL;
1551
1552unlock:
1553 mutex_unlock(&dev_priv->drm.struct_mutex);
1554 return ret;
1555}
1556
1557static void config_oa_regs(struct drm_i915_private *dev_priv,
1558 const struct i915_oa_reg *regs,
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001559 u32 n_regs)
Robert Braggd7965152016-11-07 19:49:52 +00001560{
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001561 u32 i;
Robert Braggd7965152016-11-07 19:49:52 +00001562
1563 for (i = 0; i < n_regs; i++) {
1564 const struct i915_oa_reg *reg = regs + i;
1565
1566 I915_WRITE(reg->addr, reg->value);
1567 }
1568}
1569
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001570static int hsw_enable_metric_set(struct i915_perf_stream *stream)
Robert Braggd7965152016-11-07 19:49:52 +00001571{
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001572 struct drm_i915_private *dev_priv = stream->dev_priv;
1573 const struct i915_oa_config *oa_config = stream->oa_config;
1574
Robert Braggd7965152016-11-07 19:49:52 +00001575 /* PRM:
1576 *
1577 * OA unit is using “crclk” for its functionality. When trunk
1578 * level clock gating takes place, OA clock would be gated,
1579 * unable to count the events from non-render clock domain.
1580 * Render clock gating must be disabled when OA is enabled to
1581 * count the events from non-render domain. Unit level clock
1582 * gating for RCS should also be disabled.
1583 */
1584 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1585 ~GEN7_DOP_CLOCK_GATE_ENABLE));
1586 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
1587 GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1588
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001589 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
Robert Braggd7965152016-11-07 19:49:52 +00001590
1591 /* It apparently takes a fairly long time for a new MUX
1592 * configuration to be be applied after these register writes.
1593 * This delay duration was derived empirically based on the
1594 * render_basic config but hopefully it covers the maximum
1595 * configuration latency.
1596 *
1597 * As a fallback, the checks in _append_oa_reports() to skip
1598 * invalid OA reports do also seem to work to discard reports
1599 * generated before this config has completed - albeit not
1600 * silently.
1601 *
1602 * Unfortunately this is essentially a magic number, since we
1603 * don't currently know of a reliable mechanism for predicting
1604 * how long the MUX config will take to apply and besides
1605 * seeing invalid reports we don't know of a reliable way to
1606 * explicitly check that the MUX config has landed.
1607 *
1608 * It's even possible we've miss characterized the underlying
1609 * problem - it just seems like the simplest explanation why
1610 * a delay at this location would mitigate any invalid reports.
1611 */
1612 usleep_range(15000, 20000);
1613
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001614 config_oa_regs(dev_priv, oa_config->b_counter_regs,
1615 oa_config->b_counter_regs_len);
Robert Braggd7965152016-11-07 19:49:52 +00001616
1617 return 0;
1618}
1619
1620static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
1621{
1622 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
1623 ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1624 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
1625 GEN7_DOP_CLOCK_GATE_ENABLE));
1626
1627 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1628 ~GT_NOA_ENABLE));
1629}
1630
Robert Bragg19f81df2017-06-13 12:23:03 +01001631/*
1632 * NB: It must always remain pointer safe to run this even if the OA unit
1633 * has been disabled.
1634 *
1635 * It's fine to put out-of-date values into these per-context registers
1636 * in the case that the OA unit has been disabled.
1637 */
Chris Wilsonb146e5e2019-03-06 08:47:04 +00001638static void
1639gen8_update_reg_state_unlocked(struct intel_context *ce,
1640 u32 *reg_state,
1641 const struct i915_oa_config *oa_config)
Robert Bragg19f81df2017-06-13 12:23:03 +01001642{
Chris Wilsonb146e5e2019-03-06 08:47:04 +00001643 struct drm_i915_private *i915 = ce->gem_context->i915;
1644 u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
1645 u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
Robert Bragg19f81df2017-06-13 12:23:03 +01001646 /* The MMIO offsets for Flex EU registers aren't contiguous */
Lionel Landwerlin35ab4fd2018-08-13 09:02:18 +01001647 i915_reg_t flex_regs[] = {
1648 EU_PERF_CNTL0,
1649 EU_PERF_CNTL1,
1650 EU_PERF_CNTL2,
1651 EU_PERF_CNTL3,
1652 EU_PERF_CNTL4,
1653 EU_PERF_CNTL5,
1654 EU_PERF_CNTL6,
Robert Bragg19f81df2017-06-13 12:23:03 +01001655 };
1656 int i;
1657
Lionel Landwerlin35ab4fd2018-08-13 09:02:18 +01001658 CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
Chris Wilsonb146e5e2019-03-06 08:47:04 +00001659 (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
1660 (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
Lionel Landwerlin35ab4fd2018-08-13 09:02:18 +01001661 GEN8_OA_COUNTER_RESUME);
Robert Bragg19f81df2017-06-13 12:23:03 +01001662
Lionel Landwerlin35ab4fd2018-08-13 09:02:18 +01001663 for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
Robert Bragg19f81df2017-06-13 12:23:03 +01001664 u32 state_offset = ctx_flexeu0 + i * 2;
Lionel Landwerlin35ab4fd2018-08-13 09:02:18 +01001665 u32 mmio = i915_mmio_reg_offset(flex_regs[i]);
Robert Bragg19f81df2017-06-13 12:23:03 +01001666
1667 /*
1668 * This arbitrary default will select the 'EU FPU0 Pipeline
1669 * Active' event. In the future it's anticipated that there
1670 * will be an explicit 'No Event' we can select, but not yet...
1671 */
1672 u32 value = 0;
Robert Bragg19f81df2017-06-13 12:23:03 +01001673
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001674 if (oa_config) {
1675 u32 j;
1676
1677 for (j = 0; j < oa_config->flex_regs_len; j++) {
1678 if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
1679 value = oa_config->flex_regs[j].value;
1680 break;
1681 }
Robert Bragg19f81df2017-06-13 12:23:03 +01001682 }
1683 }
1684
Lionel Landwerlin35ab4fd2018-08-13 09:02:18 +01001685 CTX_REG(reg_state, state_offset, flex_regs[i], value);
Robert Bragg19f81df2017-06-13 12:23:03 +01001686 }
Lionel Landwerlinec431ea2019-02-05 09:50:29 +00001687
Chris Wilsonb146e5e2019-03-06 08:47:04 +00001688 CTX_REG(reg_state,
1689 CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
Chris Wilson09407572019-04-24 10:51:34 +01001690 intel_sseu_make_rpcs(i915, &ce->sseu));
Robert Bragg19f81df2017-06-13 12:23:03 +01001691}
1692
1693/*
Robert Bragg19f81df2017-06-13 12:23:03 +01001694 * Manages updating the per-context aspects of the OA stream
1695 * configuration across all contexts.
1696 *
1697 * The awkward consideration here is that OACTXCONTROL controls the
1698 * exponent for periodic sampling which is primarily used for system
1699 * wide profiling where we'd like a consistent sampling period even in
1700 * the face of context switches.
1701 *
1702 * Our approach of updating the register state context (as opposed to
1703 * say using a workaround batch buffer) ensures that the hardware
1704 * won't automatically reload an out-of-date timer exponent even
1705 * transiently before a WA BB could be parsed.
1706 *
1707 * This function needs to:
1708 * - Ensure the currently running context's per-context OA state is
1709 * updated
1710 * - Ensure that all existing contexts will have the correct per-context
1711 * OA state if they are scheduled for use.
1712 * - Ensure any new contexts will be initialized with the correct
1713 * per-context OA state.
1714 *
1715 * Note: it's only the RCS/Render context that has any OA state.
1716 */
1717static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00001718 const struct i915_oa_config *oa_config)
Robert Bragg19f81df2017-06-13 12:23:03 +01001719{
Chris Wilson8a68d462019-03-05 18:03:30 +00001720 struct intel_engine_cs *engine = dev_priv->engine[RCS0];
Chris Wilson666424a2018-09-14 13:35:04 +01001721 unsigned int map_type = i915_coherent_map_type(dev_priv);
Robert Bragg19f81df2017-06-13 12:23:03 +01001722 struct i915_gem_context *ctx;
Tvrtko Ursulin722f3de2018-09-12 16:29:30 +01001723 struct i915_request *rq;
Robert Bragg19f81df2017-06-13 12:23:03 +01001724 int ret;
Robert Bragg19f81df2017-06-13 12:23:03 +01001725
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00001726 lockdep_assert_held(&dev_priv->drm.struct_mutex);
Robert Bragg19f81df2017-06-13 12:23:03 +01001727
Robert Bragg19f81df2017-06-13 12:23:03 +01001728 /*
1729 * The OA register config is setup through the context image. This image
1730 * might be written to by the GPU on context switch (in particular on
1731 * lite-restore). This means we can't safely update a context's image,
1732 * if this context is scheduled/submitted to run on the GPU.
1733 *
1734 * We could emit the OA register config through the batch buffer but
1735 * this might leave small interval of time where the OA unit is
1736 * configured at an invalid sampling period.
1737 *
1738 * So far the best way to work around this issue seems to be draining
1739 * the GPU from any submitted work.
1740 */
Chris Wilsonec625fb2018-07-09 13:20:42 +01001741 ret = i915_gem_wait_for_idle(dev_priv,
Tvrtko Ursulin722f3de2018-09-12 16:29:30 +01001742 I915_WAIT_LOCKED,
Chris Wilsonec625fb2018-07-09 13:20:42 +01001743 MAX_SCHEDULE_TIMEOUT);
Robert Bragg19f81df2017-06-13 12:23:03 +01001744 if (ret)
Lionel Landwerlin1c71bc52018-08-13 09:02:17 +01001745 return ret;
Robert Bragg19f81df2017-06-13 12:23:03 +01001746
1747 /* Update all contexts now that we've stalled the submission. */
Chris Wilson829a0af2017-06-20 12:05:45 +01001748 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilsonc4d52fe2019-03-08 13:25:19 +00001749 struct intel_context *ce = intel_context_lookup(ctx, engine);
Robert Bragg19f81df2017-06-13 12:23:03 +01001750 u32 *regs;
1751
1752 /* OA settings will be set upon first use */
Chris Wilsonc4d52fe2019-03-08 13:25:19 +00001753 if (!ce || !ce->state)
Robert Bragg19f81df2017-06-13 12:23:03 +01001754 continue;
1755
Chris Wilson666424a2018-09-14 13:35:04 +01001756 regs = i915_gem_object_pin_map(ce->state->obj, map_type);
Lionel Landwerlin1c71bc52018-08-13 09:02:17 +01001757 if (IS_ERR(regs))
1758 return PTR_ERR(regs);
Robert Bragg19f81df2017-06-13 12:23:03 +01001759
1760 ce->state->obj->mm.dirty = true;
1761 regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
1762
Chris Wilsonb146e5e2019-03-06 08:47:04 +00001763 gen8_update_reg_state_unlocked(ce, regs, oa_config);
Robert Bragg19f81df2017-06-13 12:23:03 +01001764
1765 i915_gem_object_unpin_map(ce->state->obj);
1766 }
1767
Tvrtko Ursulin722f3de2018-09-12 16:29:30 +01001768 /*
1769 * Apply the configuration by doing one context restore of the edited
1770 * context image.
1771 */
Chris Wilson2ccdf6a2019-04-24 21:07:16 +01001772 rq = i915_request_create(engine->kernel_context);
Tvrtko Ursulin722f3de2018-09-12 16:29:30 +01001773 if (IS_ERR(rq))
1774 return PTR_ERR(rq);
1775
1776 i915_request_add(rq);
1777
1778 return 0;
Robert Bragg19f81df2017-06-13 12:23:03 +01001779}
1780
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001781static int gen8_enable_metric_set(struct i915_perf_stream *stream)
Robert Bragg19f81df2017-06-13 12:23:03 +01001782{
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001783 struct drm_i915_private *dev_priv = stream->dev_priv;
1784 const struct i915_oa_config *oa_config = stream->oa_config;
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001785 int ret;
Robert Bragg19f81df2017-06-13 12:23:03 +01001786
1787 /*
1788 * We disable slice/unslice clock ratio change reports on SKL since
1789 * they are too noisy. The HW generates a lot of redundant reports
1790 * where the ratio hasn't really changed causing a lot of redundant
1791 * work to processes and increasing the chances we'll hit buffer
1792 * overruns.
1793 *
1794 * Although we don't currently use the 'disable overrun' OABUFFER
1795 * feature it's worth noting that clock ratio reports have to be
1796 * disabled before considering to use that feature since the HW doesn't
1797 * correctly block these reports.
1798 *
1799 * Currently none of the high-level metrics we have depend on knowing
1800 * this ratio to normalize.
1801 *
1802 * Note: This register is not power context saved and restored, but
1803 * that's OK considering that we disable RC6 while the OA unit is
1804 * enabled.
1805 *
1806 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
1807 * be read back from automatically triggered reports, as part of the
1808 * RPT_ID field.
1809 */
Lucas De Marchi00690002018-12-12 10:10:42 -08001810 if (IS_GEN_RANGE(dev_priv, 9, 11)) {
Robert Bragg19f81df2017-06-13 12:23:03 +01001811 I915_WRITE(GEN8_OA_DEBUG,
1812 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
1813 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
1814 }
1815
1816 /*
1817 * Update all contexts prior writing the mux configurations as we need
1818 * to make sure all slices/subslices are ON before writing to NOA
1819 * registers.
1820 */
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00001821 ret = gen8_configure_all_contexts(dev_priv, oa_config);
Robert Bragg19f81df2017-06-13 12:23:03 +01001822 if (ret)
1823 return ret;
1824
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001825 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
1826
Lionel Landwerlin701f8232017-08-03 17:58:08 +01001827 config_oa_regs(dev_priv, oa_config->b_counter_regs,
1828 oa_config->b_counter_regs_len);
Robert Bragg19f81df2017-06-13 12:23:03 +01001829
1830 return 0;
1831}
1832
1833static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
1834{
1835 /* Reset all contexts' slices/subslices configurations. */
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00001836 gen8_configure_all_contexts(dev_priv, NULL);
Lionel Landwerlin28964cf2017-08-03 17:58:10 +01001837
1838 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1839 ~GT_NOA_ENABLE));
Robert Bragg19f81df2017-06-13 12:23:03 +01001840}
1841
Lionel Landwerlin95690a02017-11-10 19:08:43 +00001842static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
1843{
1844 /* Reset all contexts' slices/subslices configurations. */
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00001845 gen8_configure_all_contexts(dev_priv, NULL);
Lionel Landwerlin95690a02017-11-10 19:08:43 +00001846
1847 /* Make sure we disable noa to save power. */
1848 I915_WRITE(RPM_CONFIG1,
1849 I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
1850}
1851
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001852static void gen7_oa_enable(struct i915_perf_stream *stream)
Robert Braggd7965152016-11-07 19:49:52 +00001853{
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001854 struct drm_i915_private *dev_priv = stream->dev_priv;
1855 struct i915_gem_context *ctx = stream->ctx;
Lionel Landwerlin11051302018-03-26 10:08:23 +01001856 u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
1857 bool periodic = dev_priv->perf.oa.periodic;
1858 u32 period_exponent = dev_priv->perf.oa.period_exponent;
1859 u32 report_format = dev_priv->perf.oa.oa_buffer.format;
1860
Robert Bragg1bef3402017-06-13 12:23:06 +01001861 /*
1862 * Reset buf pointers so we don't forward reports from before now.
1863 *
1864 * Think carefully if considering trying to avoid this, since it
1865 * also ensures status flags and the buffer itself are cleared
1866 * in error paths, and we have checks for invalid reports based
1867 * on the assumption that certain fields are written to zeroed
1868 * memory which this helps maintains.
1869 */
1870 gen7_init_oa_buffer(dev_priv);
Robert Braggd7965152016-11-07 19:49:52 +00001871
Lionel Landwerlin11051302018-03-26 10:08:23 +01001872 I915_WRITE(GEN7_OACONTROL,
1873 (ctx_id & GEN7_OACONTROL_CTX_MASK) |
1874 (period_exponent <<
1875 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
1876 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
1877 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
1878 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
1879 GEN7_OACONTROL_ENABLE);
Robert Braggd7965152016-11-07 19:49:52 +00001880}
1881
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001882static void gen8_oa_enable(struct i915_perf_stream *stream)
Robert Bragg19f81df2017-06-13 12:23:03 +01001883{
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001884 struct drm_i915_private *dev_priv = stream->dev_priv;
Robert Bragg19f81df2017-06-13 12:23:03 +01001885 u32 report_format = dev_priv->perf.oa.oa_buffer.format;
1886
1887 /*
1888 * Reset buf pointers so we don't forward reports from before now.
1889 *
1890 * Think carefully if considering trying to avoid this, since it
1891 * also ensures status flags and the buffer itself are cleared
1892 * in error paths, and we have checks for invalid reports based
1893 * on the assumption that certain fields are written to zeroed
1894 * memory which this helps maintains.
1895 */
1896 gen8_init_oa_buffer(dev_priv);
1897
1898 /*
1899 * Note: we don't rely on the hardware to perform single context
1900 * filtering and instead filter on the cpu based on the context-id
1901 * field of reports
1902 */
1903 I915_WRITE(GEN8_OACONTROL, (report_format <<
1904 GEN8_OA_REPORT_FORMAT_SHIFT) |
1905 GEN8_OA_COUNTER_ENABLE);
1906}
1907
Robert Bragg16d98b32016-12-07 21:40:33 +00001908/**
1909 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
1910 * @stream: An i915 perf stream opened for OA metrics
1911 *
1912 * [Re]enables hardware periodic sampling according to the period configured
1913 * when opening the stream. This also starts a hrtimer that will periodically
1914 * check for data in the circular OA buffer for notifying userspace (e.g.
1915 * during a read() or poll()).
1916 */
Robert Braggd7965152016-11-07 19:49:52 +00001917static void i915_oa_stream_enable(struct i915_perf_stream *stream)
1918{
1919 struct drm_i915_private *dev_priv = stream->dev_priv;
1920
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001921 dev_priv->perf.oa.ops.oa_enable(stream);
Robert Braggd7965152016-11-07 19:49:52 +00001922
1923 if (dev_priv->perf.oa.periodic)
1924 hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
1925 ns_to_ktime(POLL_PERIOD),
1926 HRTIMER_MODE_REL_PINNED);
1927}
1928
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001929static void gen7_oa_disable(struct i915_perf_stream *stream)
Robert Braggd7965152016-11-07 19:49:52 +00001930{
Daniele Ceraolo Spurio97a04e02019-03-25 14:49:39 -07001931 struct intel_uncore *uncore = &stream->dev_priv->uncore;
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001932
Daniele Ceraolo Spurio97a04e02019-03-25 14:49:39 -07001933 intel_uncore_write(uncore, GEN7_OACONTROL, 0);
1934 if (intel_wait_for_register(uncore,
Chris Wilsone896d292018-05-11 14:52:07 +01001935 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
1936 50))
1937 DRM_ERROR("wait for OA to be disabled timed out\n");
Robert Braggd7965152016-11-07 19:49:52 +00001938}
1939
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001940static void gen8_oa_disable(struct i915_perf_stream *stream)
Robert Bragg19f81df2017-06-13 12:23:03 +01001941{
Daniele Ceraolo Spurio97a04e02019-03-25 14:49:39 -07001942 struct intel_uncore *uncore = &stream->dev_priv->uncore;
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001943
Daniele Ceraolo Spurio97a04e02019-03-25 14:49:39 -07001944 intel_uncore_write(uncore, GEN8_OACONTROL, 0);
1945 if (intel_wait_for_register(uncore,
Chris Wilsone896d292018-05-11 14:52:07 +01001946 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
1947 50))
1948 DRM_ERROR("wait for OA to be disabled timed out\n");
Robert Bragg19f81df2017-06-13 12:23:03 +01001949}
1950
Robert Bragg16d98b32016-12-07 21:40:33 +00001951/**
1952 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
1953 * @stream: An i915 perf stream opened for OA metrics
1954 *
1955 * Stops the OA unit from periodically writing counter reports into the
1956 * circular OA buffer. This also stops the hrtimer that periodically checks for
1957 * data in the circular OA buffer, for notifying userspace.
1958 */
Robert Braggd7965152016-11-07 19:49:52 +00001959static void i915_oa_stream_disable(struct i915_perf_stream *stream)
1960{
1961 struct drm_i915_private *dev_priv = stream->dev_priv;
1962
Lionel Landwerlin5728de22018-10-23 11:07:06 +01001963 dev_priv->perf.oa.ops.oa_disable(stream);
Robert Braggd7965152016-11-07 19:49:52 +00001964
1965 if (dev_priv->perf.oa.periodic)
1966 hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
1967}
1968
Robert Braggd7965152016-11-07 19:49:52 +00001969static const struct i915_perf_stream_ops i915_oa_stream_ops = {
1970 .destroy = i915_oa_stream_destroy,
1971 .enable = i915_oa_stream_enable,
1972 .disable = i915_oa_stream_disable,
1973 .wait_unlocked = i915_oa_wait_unlocked,
1974 .poll_wait = i915_oa_poll_wait,
1975 .read = i915_oa_read,
1976};
1977
Robert Bragg16d98b32016-12-07 21:40:33 +00001978/**
1979 * i915_oa_stream_init - validate combined props for OA stream and init
1980 * @stream: An i915 perf stream
1981 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
1982 * @props: The property state that configures stream (individually validated)
1983 *
1984 * While read_properties_unlocked() validates properties in isolation it
1985 * doesn't ensure that the combination necessarily makes sense.
1986 *
1987 * At this point it has been determined that userspace wants a stream of
1988 * OA metrics, but still we need to further validate the combined
1989 * properties are OK.
1990 *
1991 * If the configuration makes sense then we can allocate memory for
1992 * a circular OA buffer and apply the requested metric set configuration.
1993 *
1994 * Returns: zero on success or a negative error code.
1995 */
Robert Braggd7965152016-11-07 19:49:52 +00001996static int i915_oa_stream_init(struct i915_perf_stream *stream,
1997 struct drm_i915_perf_open_param *param,
1998 struct perf_open_properties *props)
1999{
2000 struct drm_i915_private *dev_priv = stream->dev_priv;
2001 int format_size;
2002 int ret;
2003
Robert Bragg442b8c02016-11-07 19:49:53 +00002004 /* If the sysfs metrics/ directory wasn't registered for some
2005 * reason then don't let userspace try their luck with config
2006 * IDs
2007 */
2008 if (!dev_priv->perf.metrics_kobj) {
Robert Bragg77085502016-12-01 17:21:52 +00002009 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
Robert Bragg442b8c02016-11-07 19:49:53 +00002010 return -EINVAL;
2011 }
2012
Robert Braggd7965152016-11-07 19:49:52 +00002013 if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
Robert Bragg77085502016-12-01 17:21:52 +00002014 DRM_DEBUG("Only OA report sampling supported\n");
Robert Braggd7965152016-11-07 19:49:52 +00002015 return -EINVAL;
2016 }
2017
Lionel Landwerlin784b1a82018-10-23 11:07:05 +01002018 if (!dev_priv->perf.oa.ops.enable_metric_set) {
Robert Bragg77085502016-12-01 17:21:52 +00002019 DRM_DEBUG("OA unit not supported\n");
Robert Braggd7965152016-11-07 19:49:52 +00002020 return -ENODEV;
2021 }
2022
2023 /* To avoid the complexity of having to accurately filter
2024 * counter reports and marshal to the appropriate client
2025 * we currently only allow exclusive access
2026 */
2027 if (dev_priv->perf.oa.exclusive_stream) {
Robert Bragg77085502016-12-01 17:21:52 +00002028 DRM_DEBUG("OA unit already in use\n");
Robert Braggd7965152016-11-07 19:49:52 +00002029 return -EBUSY;
2030 }
2031
Robert Braggd7965152016-11-07 19:49:52 +00002032 if (!props->oa_format) {
Robert Bragg77085502016-12-01 17:21:52 +00002033 DRM_DEBUG("OA report format not specified\n");
Robert Braggd7965152016-11-07 19:49:52 +00002034 return -EINVAL;
2035 }
2036
Robert Bragg712122e2017-05-11 16:43:31 +01002037 /* We set up some ratelimit state to potentially throttle any _NOTES
2038 * about spurious, invalid OA reports which we don't forward to
2039 * userspace.
2040 *
2041 * The initialization is associated with opening the stream (not driver
2042 * init) considering we print a _NOTE about any throttling when closing
2043 * the stream instead of waiting until driver _fini which no one would
2044 * ever see.
2045 *
2046 * Using the same limiting factors as printk_ratelimit()
2047 */
2048 ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
2049 5 * HZ, 10);
2050 /* Since we use a DRM_NOTE for spurious reports it would be
2051 * inconsistent to let __ratelimit() automatically print a warning for
2052 * throttling.
2053 */
2054 ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
2055 RATELIMIT_MSG_ON_RELEASE);
2056
Robert Braggd7965152016-11-07 19:49:52 +00002057 stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2058
2059 format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
2060
2061 stream->sample_flags |= SAMPLE_OA_REPORT;
2062 stream->sample_size += format_size;
2063
2064 dev_priv->perf.oa.oa_buffer.format_size = format_size;
2065 if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
2066 return -EINVAL;
2067
2068 dev_priv->perf.oa.oa_buffer.format =
2069 dev_priv->perf.oa.oa_formats[props->oa_format].format;
2070
Robert Braggd7965152016-11-07 19:49:52 +00002071 dev_priv->perf.oa.periodic = props->oa_periodic;
Robert Bragg0dd860c2017-05-11 16:43:28 +01002072 if (dev_priv->perf.oa.periodic)
Robert Braggd7965152016-11-07 19:49:52 +00002073 dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
2074
Robert Braggd7965152016-11-07 19:49:52 +00002075 if (stream->ctx) {
2076 ret = oa_get_render_ctx_id(stream);
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002077 if (ret) {
2078 DRM_DEBUG("Invalid context id to filter with\n");
Robert Braggd7965152016-11-07 19:49:52 +00002079 return ret;
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002080 }
Robert Braggd7965152016-11-07 19:49:52 +00002081 }
2082
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002083 ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002084 if (ret) {
2085 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002086 goto err_config;
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002087 }
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002088
Robert Braggd7965152016-11-07 19:49:52 +00002089 /* PRM - observability performance counters:
2090 *
2091 * OACONTROL, performance counter enable, note:
2092 *
2093 * "When this bit is set, in order to have coherent counts,
2094 * RC6 power state and trunk clock gating must be disabled.
2095 * This can be achieved by programming MMIO registers as
2096 * 0xA094=0 and 0xA090[31]=1"
2097 *
2098 * In our case we are expecting that taking pm + FORCEWAKE
2099 * references will effectively disable RC6.
2100 */
Chris Wilson6619c002019-01-14 14:21:15 +00002101 stream->wakeref = intel_runtime_pm_get(dev_priv);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07002102 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Robert Braggd7965152016-11-07 19:49:52 +00002103
Joonas Lahtinenfe841682018-11-16 15:55:09 +02002104 ret = alloc_oa_buffer(dev_priv);
sagar.a.kamble@intel.com987f8c42017-06-27 23:09:41 +05302105 if (ret)
2106 goto err_oa_buf_alloc;
2107
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00002108 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2109 if (ret)
2110 goto err_lock;
2111
Lionel Landwerlinec431ea2019-02-05 09:50:29 +00002112 stream->ops = &i915_oa_stream_ops;
2113 dev_priv->perf.oa.exclusive_stream = stream;
2114
Lionel Landwerlin5728de22018-10-23 11:07:06 +01002115 ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002116 if (ret) {
2117 DRM_DEBUG("Unable to enable metric set\n");
Robert Braggd7965152016-11-07 19:49:52 +00002118 goto err_enable;
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01002119 }
Robert Braggd7965152016-11-07 19:49:52 +00002120
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002121 mutex_unlock(&dev_priv->drm.struct_mutex);
2122
Robert Braggd7965152016-11-07 19:49:52 +00002123 return 0;
2124
2125err_enable:
Lionel Landwerlinec431ea2019-02-05 09:50:29 +00002126 dev_priv->perf.oa.exclusive_stream = NULL;
Lionel Landwerlin41d3fdc2018-03-01 11:06:13 +00002127 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
2128 mutex_unlock(&dev_priv->drm.struct_mutex);
2129
2130err_lock:
Robert Braggd7965152016-11-07 19:49:52 +00002131 free_oa_buffer(dev_priv);
2132
2133err_oa_buf_alloc:
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002134 put_oa_config(dev_priv, stream->oa_config);
2135
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07002136 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson6619c002019-01-14 14:21:15 +00002137 intel_runtime_pm_put(dev_priv, stream->wakeref);
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002138
2139err_config:
Robert Braggd7965152016-11-07 19:49:52 +00002140 if (stream->ctx)
2141 oa_put_render_ctx_id(stream);
2142
2143 return ret;
2144}
2145
Robert Bragg19f81df2017-06-13 12:23:03 +01002146void i915_oa_init_reg_state(struct intel_engine_cs *engine,
Chris Wilsonb146e5e2019-03-06 08:47:04 +00002147 struct intel_context *ce,
2148 u32 *regs)
Robert Bragg19f81df2017-06-13 12:23:03 +01002149{
Chris Wilson28b6cb02017-08-10 18:57:43 +01002150 struct i915_perf_stream *stream;
Robert Bragg19f81df2017-06-13 12:23:03 +01002151
Chris Wilson8a68d462019-03-05 18:03:30 +00002152 if (engine->class != RENDER_CLASS)
Robert Bragg19f81df2017-06-13 12:23:03 +01002153 return;
2154
Chris Wilson28b6cb02017-08-10 18:57:43 +01002155 stream = engine->i915->perf.oa.exclusive_stream;
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002156 if (stream)
Chris Wilsonb146e5e2019-03-06 08:47:04 +00002157 gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
Robert Bragg19f81df2017-06-13 12:23:03 +01002158}
2159
Robert Bragg16d98b32016-12-07 21:40:33 +00002160/**
2161 * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
2162 * @stream: An i915 perf stream
2163 * @file: An i915 perf stream file
2164 * @buf: destination buffer given by userspace
2165 * @count: the number of bytes userspace wants to read
2166 * @ppos: (inout) file seek position (unused)
2167 *
2168 * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
2169 * ensure that if we've successfully copied any data then reporting that takes
2170 * precedence over any internal error status, so the data isn't lost.
2171 *
2172 * For example ret will be -ENOSPC whenever there is more buffered data than
2173 * can be copied to userspace, but that's only interesting if we weren't able
2174 * to copy some data because it implies the userspace buffer is too small to
2175 * receive a single record (and we never split records).
2176 *
2177 * Another case with ret == -EFAULT is more of a grey area since it would seem
2178 * like bad form for userspace to ask us to overrun its buffer, but the user
2179 * knows best:
2180 *
2181 * http://yarchive.net/comp/linux/partial_reads_writes.html
2182 *
2183 * Returns: The number of bytes copied or a negative error code on failure.
2184 */
Robert Braggeec688e2016-11-07 19:49:47 +00002185static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
2186 struct file *file,
2187 char __user *buf,
2188 size_t count,
2189 loff_t *ppos)
2190{
2191 /* Note we keep the offset (aka bytes read) separate from any
2192 * error status so that the final check for whether we return
2193 * the bytes read with a higher precedence than any error (see
2194 * comment below) doesn't need to be handled/duplicated in
2195 * stream->ops->read() implementations.
2196 */
2197 size_t offset = 0;
2198 int ret = stream->ops->read(stream, buf, count, &offset);
2199
Robert Braggeec688e2016-11-07 19:49:47 +00002200 return offset ?: (ret ?: -EAGAIN);
2201}
2202
Robert Bragg16d98b32016-12-07 21:40:33 +00002203/**
2204 * i915_perf_read - handles read() FOP for i915 perf stream FDs
2205 * @file: An i915 perf stream file
2206 * @buf: destination buffer given by userspace
2207 * @count: the number of bytes userspace wants to read
2208 * @ppos: (inout) file seek position (unused)
2209 *
2210 * The entry point for handling a read() on a stream file descriptor from
2211 * userspace. Most of the work is left to the i915_perf_read_locked() and
2212 * &i915_perf_stream_ops->read but to save having stream implementations (of
2213 * which we might have multiple later) we handle blocking read here.
2214 *
2215 * We can also consistently treat trying to read from a disabled stream
2216 * as an IO error so implementations can assume the stream is enabled
2217 * while reading.
2218 *
2219 * Returns: The number of bytes copied or a negative error code on failure.
2220 */
Robert Braggeec688e2016-11-07 19:49:47 +00002221static ssize_t i915_perf_read(struct file *file,
2222 char __user *buf,
2223 size_t count,
2224 loff_t *ppos)
2225{
2226 struct i915_perf_stream *stream = file->private_data;
2227 struct drm_i915_private *dev_priv = stream->dev_priv;
2228 ssize_t ret;
2229
Robert Braggd7965152016-11-07 19:49:52 +00002230 /* To ensure it's handled consistently we simply treat all reads of a
2231 * disabled stream as an error. In particular it might otherwise lead
2232 * to a deadlock for blocking file descriptors...
2233 */
2234 if (!stream->enabled)
2235 return -EIO;
2236
Robert Braggeec688e2016-11-07 19:49:47 +00002237 if (!(file->f_flags & O_NONBLOCK)) {
Robert Braggd7965152016-11-07 19:49:52 +00002238 /* There's the small chance of false positives from
2239 * stream->ops->wait_unlocked.
2240 *
2241 * E.g. with single context filtering since we only wait until
2242 * oabuffer has >= 1 report we don't immediately know whether
2243 * any reports really belong to the current context
Robert Braggeec688e2016-11-07 19:49:47 +00002244 */
2245 do {
2246 ret = stream->ops->wait_unlocked(stream);
2247 if (ret)
2248 return ret;
2249
2250 mutex_lock(&dev_priv->perf.lock);
2251 ret = i915_perf_read_locked(stream, file,
2252 buf, count, ppos);
2253 mutex_unlock(&dev_priv->perf.lock);
2254 } while (ret == -EAGAIN);
2255 } else {
2256 mutex_lock(&dev_priv->perf.lock);
2257 ret = i915_perf_read_locked(stream, file, buf, count, ppos);
2258 mutex_unlock(&dev_priv->perf.lock);
2259 }
2260
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002261 /* We allow the poll checking to sometimes report false positive EPOLLIN
Robert Bragg26ebd9c2017-05-11 16:43:25 +01002262 * events where we might actually report EAGAIN on read() if there's
2263 * not really any data available. In this situation though we don't
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002264 * want to enter a busy loop between poll() reporting a EPOLLIN event
Robert Bragg26ebd9c2017-05-11 16:43:25 +01002265 * and read() returning -EAGAIN. Clearing the oa.pollin state here
2266 * effectively ensures we back off until the next hrtimer callback
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002267 * before reporting another EPOLLIN event.
Robert Bragg26ebd9c2017-05-11 16:43:25 +01002268 */
2269 if (ret >= 0 || ret == -EAGAIN) {
Robert Braggd7965152016-11-07 19:49:52 +00002270 /* Maybe make ->pollin per-stream state if we support multiple
2271 * concurrent streams in the future.
2272 */
2273 dev_priv->perf.oa.pollin = false;
2274 }
2275
Robert Braggeec688e2016-11-07 19:49:47 +00002276 return ret;
2277}
2278
Robert Braggd7965152016-11-07 19:49:52 +00002279static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
2280{
2281 struct drm_i915_private *dev_priv =
2282 container_of(hrtimer, typeof(*dev_priv),
2283 perf.oa.poll_check_timer);
2284
Robert Bragg19f81df2017-06-13 12:23:03 +01002285 if (oa_buffer_check_unlocked(dev_priv)) {
Robert Braggd7965152016-11-07 19:49:52 +00002286 dev_priv->perf.oa.pollin = true;
2287 wake_up(&dev_priv->perf.oa.poll_wq);
2288 }
2289
2290 hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
2291
2292 return HRTIMER_RESTART;
2293}
2294
Robert Bragg16d98b32016-12-07 21:40:33 +00002295/**
2296 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
2297 * @dev_priv: i915 device instance
2298 * @stream: An i915 perf stream
2299 * @file: An i915 perf stream file
2300 * @wait: poll() state table
2301 *
2302 * For handling userspace polling on an i915 perf stream, this calls through to
2303 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
2304 * will be woken for new stream data.
2305 *
2306 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2307 * with any non-file-operation driver hooks.
2308 *
2309 * Returns: any poll events that are ready without sleeping
2310 */
Al Viroafc9a422017-07-03 06:39:46 -04002311static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
Robert Braggd7965152016-11-07 19:49:52 +00002312 struct i915_perf_stream *stream,
Robert Braggeec688e2016-11-07 19:49:47 +00002313 struct file *file,
2314 poll_table *wait)
2315{
Al Viroafc9a422017-07-03 06:39:46 -04002316 __poll_t events = 0;
Robert Braggeec688e2016-11-07 19:49:47 +00002317
2318 stream->ops->poll_wait(stream, file, wait);
2319
Robert Braggd7965152016-11-07 19:49:52 +00002320 /* Note: we don't explicitly check whether there's something to read
2321 * here since this path may be very hot depending on what else
2322 * userspace is polling, or on the timeout in use. We rely solely on
2323 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
2324 * samples to read.
2325 */
2326 if (dev_priv->perf.oa.pollin)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002327 events |= EPOLLIN;
Robert Braggeec688e2016-11-07 19:49:47 +00002328
Robert Braggd7965152016-11-07 19:49:52 +00002329 return events;
Robert Braggeec688e2016-11-07 19:49:47 +00002330}
2331
Robert Bragg16d98b32016-12-07 21:40:33 +00002332/**
2333 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
2334 * @file: An i915 perf stream file
2335 * @wait: poll() state table
2336 *
2337 * For handling userspace polling on an i915 perf stream, this ensures
2338 * poll_wait() gets called with a wait queue that will be woken for new stream
2339 * data.
2340 *
2341 * Note: Implementation deferred to i915_perf_poll_locked()
2342 *
2343 * Returns: any poll events that are ready without sleeping
2344 */
Al Viroafc9a422017-07-03 06:39:46 -04002345static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
Robert Braggeec688e2016-11-07 19:49:47 +00002346{
2347 struct i915_perf_stream *stream = file->private_data;
2348 struct drm_i915_private *dev_priv = stream->dev_priv;
Al Viroafc9a422017-07-03 06:39:46 -04002349 __poll_t ret;
Robert Braggeec688e2016-11-07 19:49:47 +00002350
2351 mutex_lock(&dev_priv->perf.lock);
Robert Braggd7965152016-11-07 19:49:52 +00002352 ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
Robert Braggeec688e2016-11-07 19:49:47 +00002353 mutex_unlock(&dev_priv->perf.lock);
2354
2355 return ret;
2356}
2357
Robert Bragg16d98b32016-12-07 21:40:33 +00002358/**
2359 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
2360 * @stream: A disabled i915 perf stream
2361 *
2362 * [Re]enables the associated capture of data for this stream.
2363 *
2364 * If a stream was previously enabled then there's currently no intention
2365 * to provide userspace any guarantee about the preservation of previously
2366 * buffered data.
2367 */
Robert Braggeec688e2016-11-07 19:49:47 +00002368static void i915_perf_enable_locked(struct i915_perf_stream *stream)
2369{
2370 if (stream->enabled)
2371 return;
2372
2373 /* Allow stream->ops->enable() to refer to this */
2374 stream->enabled = true;
2375
2376 if (stream->ops->enable)
2377 stream->ops->enable(stream);
2378}
2379
Robert Bragg16d98b32016-12-07 21:40:33 +00002380/**
2381 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
2382 * @stream: An enabled i915 perf stream
2383 *
2384 * Disables the associated capture of data for this stream.
2385 *
2386 * The intention is that disabling an re-enabling a stream will ideally be
2387 * cheaper than destroying and re-opening a stream with the same configuration,
2388 * though there are no formal guarantees about what state or buffered data
2389 * must be retained between disabling and re-enabling a stream.
2390 *
2391 * Note: while a stream is disabled it's considered an error for userspace
2392 * to attempt to read from the stream (-EIO).
2393 */
Robert Braggeec688e2016-11-07 19:49:47 +00002394static void i915_perf_disable_locked(struct i915_perf_stream *stream)
2395{
2396 if (!stream->enabled)
2397 return;
2398
2399 /* Allow stream->ops->disable() to refer to this */
2400 stream->enabled = false;
2401
2402 if (stream->ops->disable)
2403 stream->ops->disable(stream);
2404}
2405
Robert Bragg16d98b32016-12-07 21:40:33 +00002406/**
2407 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
2408 * @stream: An i915 perf stream
2409 * @cmd: the ioctl request
2410 * @arg: the ioctl data
2411 *
2412 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2413 * with any non-file-operation driver hooks.
2414 *
2415 * Returns: zero on success or a negative error code. Returns -EINVAL for
2416 * an unknown ioctl request.
2417 */
Robert Braggeec688e2016-11-07 19:49:47 +00002418static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
2419 unsigned int cmd,
2420 unsigned long arg)
2421{
2422 switch (cmd) {
2423 case I915_PERF_IOCTL_ENABLE:
2424 i915_perf_enable_locked(stream);
2425 return 0;
2426 case I915_PERF_IOCTL_DISABLE:
2427 i915_perf_disable_locked(stream);
2428 return 0;
2429 }
2430
2431 return -EINVAL;
2432}
2433
Robert Bragg16d98b32016-12-07 21:40:33 +00002434/**
2435 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
2436 * @file: An i915 perf stream file
2437 * @cmd: the ioctl request
2438 * @arg: the ioctl data
2439 *
2440 * Implementation deferred to i915_perf_ioctl_locked().
2441 *
2442 * Returns: zero on success or a negative error code. Returns -EINVAL for
2443 * an unknown ioctl request.
2444 */
Robert Braggeec688e2016-11-07 19:49:47 +00002445static long i915_perf_ioctl(struct file *file,
2446 unsigned int cmd,
2447 unsigned long arg)
2448{
2449 struct i915_perf_stream *stream = file->private_data;
2450 struct drm_i915_private *dev_priv = stream->dev_priv;
2451 long ret;
2452
2453 mutex_lock(&dev_priv->perf.lock);
2454 ret = i915_perf_ioctl_locked(stream, cmd, arg);
2455 mutex_unlock(&dev_priv->perf.lock);
2456
2457 return ret;
2458}
2459
Robert Bragg16d98b32016-12-07 21:40:33 +00002460/**
2461 * i915_perf_destroy_locked - destroy an i915 perf stream
2462 * @stream: An i915 perf stream
2463 *
2464 * Frees all resources associated with the given i915 perf @stream, disabling
2465 * any associated data capture in the process.
2466 *
2467 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2468 * with any non-file-operation driver hooks.
2469 */
Robert Braggeec688e2016-11-07 19:49:47 +00002470static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
2471{
Robert Braggeec688e2016-11-07 19:49:47 +00002472 if (stream->enabled)
2473 i915_perf_disable_locked(stream);
2474
2475 if (stream->ops->destroy)
2476 stream->ops->destroy(stream);
2477
2478 list_del(&stream->link);
2479
Chris Wilson69df05e2016-12-18 15:37:21 +00002480 if (stream->ctx)
Chris Wilson5f09a9c2017-06-20 12:05:46 +01002481 i915_gem_context_put(stream->ctx);
Robert Braggeec688e2016-11-07 19:49:47 +00002482
2483 kfree(stream);
2484}
2485
Robert Bragg16d98b32016-12-07 21:40:33 +00002486/**
2487 * i915_perf_release - handles userspace close() of a stream file
2488 * @inode: anonymous inode associated with file
2489 * @file: An i915 perf stream file
2490 *
2491 * Cleans up any resources associated with an open i915 perf stream file.
2492 *
2493 * NB: close() can't really fail from the userspace point of view.
2494 *
2495 * Returns: zero on success or a negative error code.
2496 */
Robert Braggeec688e2016-11-07 19:49:47 +00002497static int i915_perf_release(struct inode *inode, struct file *file)
2498{
2499 struct i915_perf_stream *stream = file->private_data;
2500 struct drm_i915_private *dev_priv = stream->dev_priv;
2501
2502 mutex_lock(&dev_priv->perf.lock);
2503 i915_perf_destroy_locked(stream);
2504 mutex_unlock(&dev_priv->perf.lock);
2505
2506 return 0;
2507}
2508
2509
2510static const struct file_operations fops = {
2511 .owner = THIS_MODULE,
2512 .llseek = no_llseek,
2513 .release = i915_perf_release,
2514 .poll = i915_perf_poll,
2515 .read = i915_perf_read,
2516 .unlocked_ioctl = i915_perf_ioctl,
Lionel Landwerlin191f8962017-10-24 16:27:28 +01002517 /* Our ioctl have no arguments, so it's safe to use the same function
2518 * to handle 32bits compatibility.
2519 */
2520 .compat_ioctl = i915_perf_ioctl,
Robert Braggeec688e2016-11-07 19:49:47 +00002521};
2522
2523
Robert Bragg16d98b32016-12-07 21:40:33 +00002524/**
2525 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
2526 * @dev_priv: i915 device instance
2527 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
2528 * @props: individually validated u64 property value pairs
2529 * @file: drm file
2530 *
2531 * See i915_perf_ioctl_open() for interface details.
2532 *
2533 * Implements further stream config validation and stream initialization on
2534 * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
2535 * taken to serialize with any non-file-operation driver hooks.
2536 *
2537 * Note: at this point the @props have only been validated in isolation and
2538 * it's still necessary to validate that the combination of properties makes
2539 * sense.
2540 *
2541 * In the case where userspace is interested in OA unit metrics then further
2542 * config validation and stream initialization details will be handled by
2543 * i915_oa_stream_init(). The code here should only validate config state that
2544 * will be relevant to all stream types / backends.
2545 *
2546 * Returns: zero on success or a negative error code.
2547 */
Robert Braggeec688e2016-11-07 19:49:47 +00002548static int
2549i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
2550 struct drm_i915_perf_open_param *param,
2551 struct perf_open_properties *props,
2552 struct drm_file *file)
2553{
2554 struct i915_gem_context *specific_ctx = NULL;
2555 struct i915_perf_stream *stream = NULL;
2556 unsigned long f_flags = 0;
Robert Bragg19f81df2017-06-13 12:23:03 +01002557 bool privileged_op = true;
Robert Braggeec688e2016-11-07 19:49:47 +00002558 int stream_fd;
2559 int ret;
2560
2561 if (props->single_context) {
2562 u32 ctx_handle = props->ctx_handle;
2563 struct drm_i915_file_private *file_priv = file->driver_priv;
2564
Imre Deak635f56c2017-07-14 18:12:41 +03002565 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
2566 if (!specific_ctx) {
2567 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
2568 ctx_handle);
2569 ret = -ENOENT;
Robert Braggeec688e2016-11-07 19:49:47 +00002570 goto err;
2571 }
2572 }
2573
Robert Bragg19f81df2017-06-13 12:23:03 +01002574 /*
2575 * On Haswell the OA unit supports clock gating off for a specific
2576 * context and in this mode there's no visibility of metrics for the
2577 * rest of the system, which we consider acceptable for a
2578 * non-privileged client.
2579 *
2580 * For Gen8+ the OA unit no longer supports clock gating off for a
2581 * specific context and the kernel can't securely stop the counters
2582 * from updating as system-wide / global values. Even though we can
2583 * filter reports based on the included context ID we can't block
2584 * clients from seeing the raw / global counter values via
2585 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
2586 * enable the OA unit by default.
2587 */
2588 if (IS_HASWELL(dev_priv) && specific_ctx)
2589 privileged_op = false;
2590
Robert Braggccdf6342016-11-07 19:49:54 +00002591 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
2592 * we check a dev.i915.perf_stream_paranoid sysctl option
2593 * to determine if it's ok to access system wide OA counters
2594 * without CAP_SYS_ADMIN privileges.
2595 */
Robert Bragg19f81df2017-06-13 12:23:03 +01002596 if (privileged_op &&
Robert Braggccdf6342016-11-07 19:49:54 +00002597 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
Robert Bragg77085502016-12-01 17:21:52 +00002598 DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
Robert Braggeec688e2016-11-07 19:49:47 +00002599 ret = -EACCES;
2600 goto err_ctx;
2601 }
2602
2603 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
2604 if (!stream) {
2605 ret = -ENOMEM;
2606 goto err_ctx;
2607 }
2608
Robert Braggeec688e2016-11-07 19:49:47 +00002609 stream->dev_priv = dev_priv;
2610 stream->ctx = specific_ctx;
2611
Robert Braggd7965152016-11-07 19:49:52 +00002612 ret = i915_oa_stream_init(stream, param, props);
2613 if (ret)
2614 goto err_alloc;
2615
2616 /* we avoid simply assigning stream->sample_flags = props->sample_flags
2617 * to have _stream_init check the combination of sample flags more
2618 * thoroughly, but still this is the expected result at this point.
Robert Braggeec688e2016-11-07 19:49:47 +00002619 */
Robert Braggd7965152016-11-07 19:49:52 +00002620 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
2621 ret = -ENODEV;
Matthew Auld22f880c2017-03-27 21:34:59 +01002622 goto err_flags;
Robert Braggd7965152016-11-07 19:49:52 +00002623 }
Robert Braggeec688e2016-11-07 19:49:47 +00002624
2625 list_add(&stream->link, &dev_priv->perf.streams);
2626
2627 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
2628 f_flags |= O_CLOEXEC;
2629 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
2630 f_flags |= O_NONBLOCK;
2631
2632 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
2633 if (stream_fd < 0) {
2634 ret = stream_fd;
2635 goto err_open;
2636 }
2637
2638 if (!(param->flags & I915_PERF_FLAG_DISABLED))
2639 i915_perf_enable_locked(stream);
2640
2641 return stream_fd;
2642
2643err_open:
2644 list_del(&stream->link);
Matthew Auld22f880c2017-03-27 21:34:59 +01002645err_flags:
Robert Braggeec688e2016-11-07 19:49:47 +00002646 if (stream->ops->destroy)
2647 stream->ops->destroy(stream);
2648err_alloc:
2649 kfree(stream);
2650err_ctx:
Chris Wilson69df05e2016-12-18 15:37:21 +00002651 if (specific_ctx)
Chris Wilson5f09a9c2017-06-20 12:05:46 +01002652 i915_gem_context_put(specific_ctx);
Robert Braggeec688e2016-11-07 19:49:47 +00002653err:
2654 return ret;
2655}
2656
Robert Bragg155e9412017-06-13 12:23:05 +01002657static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
2658{
Lionel Landwerlin9f9b2792017-10-27 15:59:31 +01002659 return div64_u64(1000000000ULL * (2ULL << exponent),
Jani Nikula02584042018-12-31 16:56:41 +02002660 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Robert Bragg155e9412017-06-13 12:23:05 +01002661}
2662
Robert Bragg16d98b32016-12-07 21:40:33 +00002663/**
2664 * read_properties_unlocked - validate + copy userspace stream open properties
2665 * @dev_priv: i915 device instance
2666 * @uprops: The array of u64 key value pairs given by userspace
2667 * @n_props: The number of key value pairs expected in @uprops
2668 * @props: The stream configuration built up while validating properties
Robert Braggeec688e2016-11-07 19:49:47 +00002669 *
2670 * Note this function only validates properties in isolation it doesn't
2671 * validate that the combination of properties makes sense or that all
2672 * properties necessary for a particular kind of stream have been set.
Robert Bragg16d98b32016-12-07 21:40:33 +00002673 *
2674 * Note that there currently aren't any ordering requirements for properties so
2675 * we shouldn't validate or assume anything about ordering here. This doesn't
2676 * rule out defining new properties with ordering requirements in the future.
Robert Braggeec688e2016-11-07 19:49:47 +00002677 */
2678static int read_properties_unlocked(struct drm_i915_private *dev_priv,
2679 u64 __user *uprops,
2680 u32 n_props,
2681 struct perf_open_properties *props)
2682{
2683 u64 __user *uprop = uprops;
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002684 u32 i;
Robert Braggeec688e2016-11-07 19:49:47 +00002685
2686 memset(props, 0, sizeof(struct perf_open_properties));
2687
2688 if (!n_props) {
Robert Bragg77085502016-12-01 17:21:52 +00002689 DRM_DEBUG("No i915 perf properties given\n");
Robert Braggeec688e2016-11-07 19:49:47 +00002690 return -EINVAL;
2691 }
2692
2693 /* Considering that ID = 0 is reserved and assuming that we don't
2694 * (currently) expect any configurations to ever specify duplicate
2695 * values for a particular property ID then the last _PROP_MAX value is
2696 * one greater than the maximum number of properties we expect to get
2697 * from userspace.
2698 */
2699 if (n_props >= DRM_I915_PERF_PROP_MAX) {
Robert Bragg77085502016-12-01 17:21:52 +00002700 DRM_DEBUG("More i915 perf properties specified than exist\n");
Robert Braggeec688e2016-11-07 19:49:47 +00002701 return -EINVAL;
2702 }
2703
2704 for (i = 0; i < n_props; i++) {
Robert Bragg00319ba2016-11-07 19:49:55 +00002705 u64 oa_period, oa_freq_hz;
Robert Braggeec688e2016-11-07 19:49:47 +00002706 u64 id, value;
2707 int ret;
2708
2709 ret = get_user(id, uprop);
2710 if (ret)
2711 return ret;
2712
2713 ret = get_user(value, uprop + 1);
2714 if (ret)
2715 return ret;
2716
Matthew Auld0a309f92017-03-27 21:32:36 +01002717 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
2718 DRM_DEBUG("Unknown i915 perf property ID\n");
2719 return -EINVAL;
2720 }
2721
Robert Braggeec688e2016-11-07 19:49:47 +00002722 switch ((enum drm_i915_perf_property_id)id) {
2723 case DRM_I915_PERF_PROP_CTX_HANDLE:
2724 props->single_context = 1;
2725 props->ctx_handle = value;
2726 break;
Robert Braggd7965152016-11-07 19:49:52 +00002727 case DRM_I915_PERF_PROP_SAMPLE_OA:
Lionel Landwerlinb6dd47b2018-03-26 10:08:22 +01002728 if (value)
2729 props->sample_flags |= SAMPLE_OA_REPORT;
Robert Braggd7965152016-11-07 19:49:52 +00002730 break;
2731 case DRM_I915_PERF_PROP_OA_METRICS_SET:
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002732 if (value == 0) {
Robert Bragg77085502016-12-01 17:21:52 +00002733 DRM_DEBUG("Unknown OA metric set ID\n");
Robert Braggd7965152016-11-07 19:49:52 +00002734 return -EINVAL;
2735 }
2736 props->metrics_set = value;
2737 break;
2738 case DRM_I915_PERF_PROP_OA_FORMAT:
2739 if (value == 0 || value >= I915_OA_FORMAT_MAX) {
Robert Bragg52c57c22017-05-11 16:43:29 +01002740 DRM_DEBUG("Out-of-range OA report format %llu\n",
2741 value);
Robert Braggd7965152016-11-07 19:49:52 +00002742 return -EINVAL;
2743 }
2744 if (!dev_priv->perf.oa.oa_formats[value].size) {
Robert Bragg52c57c22017-05-11 16:43:29 +01002745 DRM_DEBUG("Unsupported OA report format %llu\n",
2746 value);
Robert Braggd7965152016-11-07 19:49:52 +00002747 return -EINVAL;
2748 }
2749 props->oa_format = value;
2750 break;
2751 case DRM_I915_PERF_PROP_OA_EXPONENT:
2752 if (value > OA_EXPONENT_MAX) {
Robert Bragg77085502016-12-01 17:21:52 +00002753 DRM_DEBUG("OA timer exponent too high (> %u)\n",
2754 OA_EXPONENT_MAX);
Robert Braggd7965152016-11-07 19:49:52 +00002755 return -EINVAL;
2756 }
2757
Robert Bragg00319ba2016-11-07 19:49:55 +00002758 /* Theoretically we can program the OA unit to sample
Robert Bragg155e9412017-06-13 12:23:05 +01002759 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
2760 * for BXT. We don't allow such high sampling
2761 * frequencies by default unless root.
Robert Braggd7965152016-11-07 19:49:52 +00002762 */
Robert Bragg155e9412017-06-13 12:23:05 +01002763
Robert Bragg00319ba2016-11-07 19:49:55 +00002764 BUILD_BUG_ON(sizeof(oa_period) != 8);
Robert Bragg155e9412017-06-13 12:23:05 +01002765 oa_period = oa_exponent_to_ns(dev_priv, value);
Robert Bragg00319ba2016-11-07 19:49:55 +00002766
2767 /* This check is primarily to ensure that oa_period <=
2768 * UINT32_MAX (before passing to do_div which only
2769 * accepts a u32 denominator), but we can also skip
2770 * checking anything < 1Hz which implicitly can't be
2771 * limited via an integer oa_max_sample_rate.
2772 */
2773 if (oa_period <= NSEC_PER_SEC) {
2774 u64 tmp = NSEC_PER_SEC;
2775 do_div(tmp, oa_period);
2776 oa_freq_hz = tmp;
2777 } else
2778 oa_freq_hz = 0;
2779
2780 if (oa_freq_hz > i915_oa_max_sample_rate &&
2781 !capable(CAP_SYS_ADMIN)) {
Robert Bragg77085502016-12-01 17:21:52 +00002782 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
Robert Bragg00319ba2016-11-07 19:49:55 +00002783 i915_oa_max_sample_rate);
Robert Braggd7965152016-11-07 19:49:52 +00002784 return -EACCES;
2785 }
2786
2787 props->oa_periodic = true;
2788 props->oa_period_exponent = value;
2789 break;
Matthew Auld0a309f92017-03-27 21:32:36 +01002790 case DRM_I915_PERF_PROP_MAX:
Robert Braggeec688e2016-11-07 19:49:47 +00002791 MISSING_CASE(id);
Robert Braggeec688e2016-11-07 19:49:47 +00002792 return -EINVAL;
2793 }
2794
2795 uprop += 2;
2796 }
2797
2798 return 0;
2799}
2800
Robert Bragg16d98b32016-12-07 21:40:33 +00002801/**
2802 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
2803 * @dev: drm device
2804 * @data: ioctl data copied from userspace (unvalidated)
2805 * @file: drm file
2806 *
2807 * Validates the stream open parameters given by userspace including flags
2808 * and an array of u64 key, value pair properties.
2809 *
2810 * Very little is assumed up front about the nature of the stream being
2811 * opened (for instance we don't assume it's for periodic OA unit metrics). An
2812 * i915-perf stream is expected to be a suitable interface for other forms of
2813 * buffered data written by the GPU besides periodic OA metrics.
2814 *
2815 * Note we copy the properties from userspace outside of the i915 perf
2816 * mutex to avoid an awkward lockdep with mmap_sem.
2817 *
2818 * Most of the implementation details are handled by
2819 * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
2820 * mutex for serializing with any non-file-operation driver hooks.
2821 *
2822 * Return: A newly opened i915 Perf stream file descriptor or negative
2823 * error code on failure.
2824 */
Robert Braggeec688e2016-11-07 19:49:47 +00002825int i915_perf_open_ioctl(struct drm_device *dev, void *data,
2826 struct drm_file *file)
2827{
2828 struct drm_i915_private *dev_priv = dev->dev_private;
2829 struct drm_i915_perf_open_param *param = data;
2830 struct perf_open_properties props;
2831 u32 known_open_flags;
2832 int ret;
2833
2834 if (!dev_priv->perf.initialized) {
Robert Bragg77085502016-12-01 17:21:52 +00002835 DRM_DEBUG("i915 perf interface not available for this system\n");
Robert Braggeec688e2016-11-07 19:49:47 +00002836 return -ENOTSUPP;
2837 }
2838
2839 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
2840 I915_PERF_FLAG_FD_NONBLOCK |
2841 I915_PERF_FLAG_DISABLED;
2842 if (param->flags & ~known_open_flags) {
Robert Bragg77085502016-12-01 17:21:52 +00002843 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
Robert Braggeec688e2016-11-07 19:49:47 +00002844 return -EINVAL;
2845 }
2846
2847 ret = read_properties_unlocked(dev_priv,
2848 u64_to_user_ptr(param->properties_ptr),
2849 param->num_properties,
2850 &props);
2851 if (ret)
2852 return ret;
2853
2854 mutex_lock(&dev_priv->perf.lock);
2855 ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
2856 mutex_unlock(&dev_priv->perf.lock);
2857
2858 return ret;
2859}
2860
Robert Bragg16d98b32016-12-07 21:40:33 +00002861/**
2862 * i915_perf_register - exposes i915-perf to userspace
2863 * @dev_priv: i915 device instance
2864 *
2865 * In particular OA metric sets are advertised under a sysfs metrics/
2866 * directory allowing userspace to enumerate valid IDs that can be
2867 * used to open an i915-perf stream.
2868 */
Robert Bragg442b8c02016-11-07 19:49:53 +00002869void i915_perf_register(struct drm_i915_private *dev_priv)
2870{
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002871 int ret;
2872
Robert Bragg442b8c02016-11-07 19:49:53 +00002873 if (!dev_priv->perf.initialized)
2874 return;
2875
2876 /* To be sure we're synchronized with an attempted
2877 * i915_perf_open_ioctl(); considering that we register after
2878 * being exposed to userspace.
2879 */
2880 mutex_lock(&dev_priv->perf.lock);
2881
2882 dev_priv->perf.metrics_kobj =
2883 kobject_create_and_add("metrics",
2884 &dev_priv->drm.primary->kdev->kobj);
2885 if (!dev_priv->perf.metrics_kobj)
2886 goto exit;
2887
Chris Wilson40f75ea2017-08-10 18:57:41 +01002888 sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002889
Rodrigo Vivi2dd24a92019-03-08 13:42:58 -08002890 if (INTEL_GEN(dev_priv) >= 11) {
Rodrigo Vivi993298a2019-03-01 09:27:03 -08002891 i915_perf_load_test_config_icl(dev_priv);
2892 } else if (IS_CANNONLAKE(dev_priv)) {
2893 i915_perf_load_test_config_cnl(dev_priv);
2894 } else if (IS_COFFEELAKE(dev_priv)) {
2895 if (IS_CFL_GT2(dev_priv))
2896 i915_perf_load_test_config_cflgt2(dev_priv);
2897 if (IS_CFL_GT3(dev_priv))
2898 i915_perf_load_test_config_cflgt3(dev_priv);
2899 } else if (IS_GEMINILAKE(dev_priv)) {
2900 i915_perf_load_test_config_glk(dev_priv);
2901 } else if (IS_KABYLAKE(dev_priv)) {
2902 if (IS_KBL_GT2(dev_priv))
2903 i915_perf_load_test_config_kblgt2(dev_priv);
2904 else if (IS_KBL_GT3(dev_priv))
2905 i915_perf_load_test_config_kblgt3(dev_priv);
2906 } else if (IS_BROXTON(dev_priv)) {
2907 i915_perf_load_test_config_bxt(dev_priv);
Robert Bragg19f81df2017-06-13 12:23:03 +01002908 } else if (IS_SKYLAKE(dev_priv)) {
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002909 if (IS_SKL_GT2(dev_priv))
2910 i915_perf_load_test_config_sklgt2(dev_priv);
2911 else if (IS_SKL_GT3(dev_priv))
2912 i915_perf_load_test_config_sklgt3(dev_priv);
2913 else if (IS_SKL_GT4(dev_priv))
2914 i915_perf_load_test_config_sklgt4(dev_priv);
Rodrigo Vivi993298a2019-03-01 09:27:03 -08002915 } else if (IS_CHERRYVIEW(dev_priv)) {
2916 i915_perf_load_test_config_chv(dev_priv);
2917 } else if (IS_BROADWELL(dev_priv)) {
2918 i915_perf_load_test_config_bdw(dev_priv);
2919 } else if (IS_HASWELL(dev_priv)) {
2920 i915_perf_load_test_config_hsw(dev_priv);
2921}
Robert Bragg442b8c02016-11-07 19:49:53 +00002922
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002923 if (dev_priv->perf.oa.test_config.id == 0)
2924 goto sysfs_error;
2925
2926 ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
2927 &dev_priv->perf.oa.test_config.sysfs_metric);
2928 if (ret)
2929 goto sysfs_error;
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002930
2931 atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
2932
Robert Bragg19f81df2017-06-13 12:23:03 +01002933 goto exit;
2934
2935sysfs_error:
2936 kobject_put(dev_priv->perf.metrics_kobj);
2937 dev_priv->perf.metrics_kobj = NULL;
2938
Robert Bragg442b8c02016-11-07 19:49:53 +00002939exit:
2940 mutex_unlock(&dev_priv->perf.lock);
2941}
2942
Robert Bragg16d98b32016-12-07 21:40:33 +00002943/**
2944 * i915_perf_unregister - hide i915-perf from userspace
2945 * @dev_priv: i915 device instance
2946 *
2947 * i915-perf state cleanup is split up into an 'unregister' and
2948 * 'deinit' phase where the interface is first hidden from
2949 * userspace by i915_perf_unregister() before cleaning up
2950 * remaining state in i915_perf_fini().
2951 */
Robert Bragg442b8c02016-11-07 19:49:53 +00002952void i915_perf_unregister(struct drm_i915_private *dev_priv)
2953{
Robert Bragg442b8c02016-11-07 19:49:53 +00002954 if (!dev_priv->perf.metrics_kobj)
2955 return;
2956
Lionel Landwerlin701f8232017-08-03 17:58:08 +01002957 sysfs_remove_group(dev_priv->perf.metrics_kobj,
2958 &dev_priv->perf.oa.test_config.sysfs_metric);
Robert Bragg442b8c02016-11-07 19:49:53 +00002959
2960 kobject_put(dev_priv->perf.metrics_kobj);
2961 dev_priv->perf.metrics_kobj = NULL;
2962}
2963
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002964static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
2965{
2966 static const i915_reg_t flex_eu_regs[] = {
2967 EU_PERF_CNTL0,
2968 EU_PERF_CNTL1,
2969 EU_PERF_CNTL2,
2970 EU_PERF_CNTL3,
2971 EU_PERF_CNTL4,
2972 EU_PERF_CNTL5,
2973 EU_PERF_CNTL6,
2974 };
2975 int i;
2976
2977 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00002978 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002979 return true;
2980 }
2981 return false;
2982}
2983
2984static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
2985{
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00002986 return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
2987 addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
2988 (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
2989 addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
2990 (addr >= i915_mmio_reg_offset(OACEC0_0) &&
2991 addr <= i915_mmio_reg_offset(OACEC7_1));
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01002992}
2993
2994static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
2995{
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00002996 return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
2997 (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
2998 addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
2999 (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
3000 addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
3001 (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
3002 addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003003}
3004
3005static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3006{
3007 return gen7_is_valid_mux_addr(dev_priv, addr) ||
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003008 addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
3009 (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
3010 addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003011}
3012
Lionel Landwerlin95690a02017-11-10 19:08:43 +00003013static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3014{
3015 return gen8_is_valid_mux_addr(dev_priv, addr) ||
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003016 (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
3017 addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
Lionel Landwerlin95690a02017-11-10 19:08:43 +00003018}
3019
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003020static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3021{
3022 return gen7_is_valid_mux_addr(dev_priv, addr) ||
3023 (addr >= 0x25100 && addr <= 0x2FF90) ||
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003024 (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
3025 addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
3026 addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003027}
3028
3029static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3030{
3031 return gen7_is_valid_mux_addr(dev_priv, addr) ||
3032 (addr >= 0x182300 && addr <= 0x1823A4);
3033}
3034
Jani Nikula739f3ab2019-01-16 11:15:19 +02003035static u32 mask_reg_value(u32 reg, u32 val)
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003036{
3037 /* HALF_SLICE_CHICKEN2 is programmed with a the
3038 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3039 * programmed by userspace doesn't change this.
3040 */
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003041 if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003042 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3043
3044 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3045 * indicated by its name and a bunch of selection fields used by OA
3046 * configs.
3047 */
Lionel Landwerlin7c52a222017-11-13 23:34:52 +00003048 if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003049 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3050
3051 return val;
3052}
3053
3054static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
3055 bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
3056 u32 __user *regs,
3057 u32 n_regs)
3058{
3059 struct i915_oa_reg *oa_regs;
3060 int err;
3061 u32 i;
3062
3063 if (!n_regs)
3064 return NULL;
3065
Linus Torvalds96d4f262019-01-03 18:57:57 -08003066 if (!access_ok(regs, n_regs * sizeof(u32) * 2))
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003067 return ERR_PTR(-EFAULT);
3068
3069 /* No is_valid function means we're not allowing any register to be programmed. */
3070 GEM_BUG_ON(!is_valid);
3071 if (!is_valid)
3072 return ERR_PTR(-EINVAL);
3073
3074 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
3075 if (!oa_regs)
3076 return ERR_PTR(-ENOMEM);
3077
3078 for (i = 0; i < n_regs; i++) {
3079 u32 addr, value;
3080
3081 err = get_user(addr, regs);
3082 if (err)
3083 goto addr_err;
3084
3085 if (!is_valid(dev_priv, addr)) {
3086 DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
3087 err = -EINVAL;
3088 goto addr_err;
3089 }
3090
3091 err = get_user(value, regs + 1);
3092 if (err)
3093 goto addr_err;
3094
3095 oa_regs[i].addr = _MMIO(addr);
3096 oa_regs[i].value = mask_reg_value(addr, value);
3097
3098 regs += 2;
3099 }
3100
3101 return oa_regs;
3102
3103addr_err:
3104 kfree(oa_regs);
3105 return ERR_PTR(err);
3106}
3107
3108static ssize_t show_dynamic_id(struct device *dev,
3109 struct device_attribute *attr,
3110 char *buf)
3111{
3112 struct i915_oa_config *oa_config =
3113 container_of(attr, typeof(*oa_config), sysfs_metric_id);
3114
3115 return sprintf(buf, "%d\n", oa_config->id);
3116}
3117
3118static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
3119 struct i915_oa_config *oa_config)
3120{
Chris Wilson28152a22017-08-03 23:37:00 +01003121 sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003122 oa_config->sysfs_metric_id.attr.name = "id";
3123 oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
3124 oa_config->sysfs_metric_id.show = show_dynamic_id;
3125 oa_config->sysfs_metric_id.store = NULL;
3126
3127 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
3128 oa_config->attrs[1] = NULL;
3129
3130 oa_config->sysfs_metric.name = oa_config->uuid;
3131 oa_config->sysfs_metric.attrs = oa_config->attrs;
3132
3133 return sysfs_create_group(dev_priv->perf.metrics_kobj,
3134 &oa_config->sysfs_metric);
3135}
3136
3137/**
3138 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
3139 * @dev: drm device
3140 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
3141 * userspace (unvalidated)
3142 * @file: drm file
3143 *
3144 * Validates the submitted OA register to be saved into a new OA config that
3145 * can then be used for programming the OA unit and its NOA network.
3146 *
3147 * Returns: A new allocated config number to be used with the perf open ioctl
3148 * or a negative error code on failure.
3149 */
3150int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3151 struct drm_file *file)
3152{
3153 struct drm_i915_private *dev_priv = dev->dev_private;
3154 struct drm_i915_perf_oa_config *args = data;
3155 struct i915_oa_config *oa_config, *tmp;
3156 int err, id;
3157
3158 if (!dev_priv->perf.initialized) {
3159 DRM_DEBUG("i915 perf interface not available for this system\n");
3160 return -ENOTSUPP;
3161 }
3162
3163 if (!dev_priv->perf.metrics_kobj) {
3164 DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
3165 return -EINVAL;
3166 }
3167
3168 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3169 DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
3170 return -EACCES;
3171 }
3172
3173 if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
3174 (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
3175 (!args->flex_regs_ptr || !args->n_flex_regs)) {
3176 DRM_DEBUG("No OA registers given\n");
3177 return -EINVAL;
3178 }
3179
3180 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
3181 if (!oa_config) {
3182 DRM_DEBUG("Failed to allocate memory for the OA config\n");
3183 return -ENOMEM;
3184 }
3185
3186 atomic_set(&oa_config->ref_count, 1);
3187
3188 if (!uuid_is_valid(args->uuid)) {
3189 DRM_DEBUG("Invalid uuid format for OA config\n");
3190 err = -EINVAL;
3191 goto reg_err;
3192 }
3193
3194 /* Last character in oa_config->uuid will be 0 because oa_config is
3195 * kzalloc.
3196 */
3197 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
3198
3199 oa_config->mux_regs_len = args->n_mux_regs;
3200 oa_config->mux_regs =
3201 alloc_oa_regs(dev_priv,
3202 dev_priv->perf.oa.ops.is_valid_mux_reg,
3203 u64_to_user_ptr(args->mux_regs_ptr),
3204 args->n_mux_regs);
3205
3206 if (IS_ERR(oa_config->mux_regs)) {
3207 DRM_DEBUG("Failed to create OA config for mux_regs\n");
3208 err = PTR_ERR(oa_config->mux_regs);
3209 goto reg_err;
3210 }
3211
3212 oa_config->b_counter_regs_len = args->n_boolean_regs;
3213 oa_config->b_counter_regs =
3214 alloc_oa_regs(dev_priv,
3215 dev_priv->perf.oa.ops.is_valid_b_counter_reg,
3216 u64_to_user_ptr(args->boolean_regs_ptr),
3217 args->n_boolean_regs);
3218
3219 if (IS_ERR(oa_config->b_counter_regs)) {
3220 DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
3221 err = PTR_ERR(oa_config->b_counter_regs);
3222 goto reg_err;
3223 }
3224
3225 if (INTEL_GEN(dev_priv) < 8) {
3226 if (args->n_flex_regs != 0) {
3227 err = -EINVAL;
3228 goto reg_err;
3229 }
3230 } else {
3231 oa_config->flex_regs_len = args->n_flex_regs;
3232 oa_config->flex_regs =
3233 alloc_oa_regs(dev_priv,
3234 dev_priv->perf.oa.ops.is_valid_flex_reg,
3235 u64_to_user_ptr(args->flex_regs_ptr),
3236 args->n_flex_regs);
3237
3238 if (IS_ERR(oa_config->flex_regs)) {
3239 DRM_DEBUG("Failed to create OA config for flex_regs\n");
3240 err = PTR_ERR(oa_config->flex_regs);
3241 goto reg_err;
3242 }
3243 }
3244
3245 err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
3246 if (err)
3247 goto reg_err;
3248
3249 /* We shouldn't have too many configs, so this iteration shouldn't be
3250 * too costly.
3251 */
3252 idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
3253 if (!strcmp(tmp->uuid, oa_config->uuid)) {
3254 DRM_DEBUG("OA config already exists with this uuid\n");
3255 err = -EADDRINUSE;
3256 goto sysfs_err;
3257 }
3258 }
3259
3260 err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
3261 if (err) {
3262 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
3263 goto sysfs_err;
3264 }
3265
3266 /* Config id 0 is invalid, id 1 for kernel stored test config. */
3267 oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
3268 oa_config, 2,
3269 0, GFP_KERNEL);
3270 if (oa_config->id < 0) {
3271 DRM_DEBUG("Failed to create sysfs entry for OA config\n");
3272 err = oa_config->id;
3273 goto sysfs_err;
3274 }
3275
3276 mutex_unlock(&dev_priv->perf.metrics_lock);
3277
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01003278 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
3279
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003280 return oa_config->id;
3281
3282sysfs_err:
3283 mutex_unlock(&dev_priv->perf.metrics_lock);
3284reg_err:
3285 put_oa_config(dev_priv, oa_config);
3286 DRM_DEBUG("Failed to add new OA config\n");
3287 return err;
3288}
3289
3290/**
3291 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
3292 * @dev: drm device
3293 * @data: ioctl data (pointer to u64 integer) copied from userspace
3294 * @file: drm file
3295 *
3296 * Configs can be removed while being used, the will stop appearing in sysfs
3297 * and their content will be freed when the stream using the config is closed.
3298 *
3299 * Returns: 0 on success or a negative error code on failure.
3300 */
3301int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
3302 struct drm_file *file)
3303{
3304 struct drm_i915_private *dev_priv = dev->dev_private;
3305 u64 *arg = data;
3306 struct i915_oa_config *oa_config;
3307 int ret;
3308
3309 if (!dev_priv->perf.initialized) {
3310 DRM_DEBUG("i915 perf interface not available for this system\n");
3311 return -ENOTSUPP;
3312 }
3313
3314 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3315 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
3316 return -EACCES;
3317 }
3318
3319 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
3320 if (ret)
3321 goto lock_err;
3322
3323 oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
3324 if (!oa_config) {
3325 DRM_DEBUG("Failed to remove unknown OA config\n");
3326 ret = -ENOENT;
3327 goto config_err;
3328 }
3329
3330 GEM_BUG_ON(*arg != oa_config->id);
3331
3332 sysfs_remove_group(dev_priv->perf.metrics_kobj,
3333 &oa_config->sysfs_metric);
3334
3335 idr_remove(&dev_priv->perf.metrics_idr, *arg);
Lionel Landwerlin9bd9be62018-03-26 10:08:28 +01003336
3337 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
3338
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003339 put_oa_config(dev_priv, oa_config);
3340
3341config_err:
3342 mutex_unlock(&dev_priv->perf.metrics_lock);
3343lock_err:
3344 return ret;
3345}
3346
Robert Braggccdf6342016-11-07 19:49:54 +00003347static struct ctl_table oa_table[] = {
3348 {
3349 .procname = "perf_stream_paranoid",
3350 .data = &i915_perf_stream_paranoid,
3351 .maxlen = sizeof(i915_perf_stream_paranoid),
3352 .mode = 0644,
3353 .proc_handler = proc_dointvec_minmax,
3354 .extra1 = &zero,
3355 .extra2 = &one,
3356 },
Robert Bragg00319ba2016-11-07 19:49:55 +00003357 {
3358 .procname = "oa_max_sample_rate",
3359 .data = &i915_oa_max_sample_rate,
3360 .maxlen = sizeof(i915_oa_max_sample_rate),
3361 .mode = 0644,
3362 .proc_handler = proc_dointvec_minmax,
3363 .extra1 = &zero,
3364 .extra2 = &oa_sample_rate_hard_limit,
3365 },
Robert Braggccdf6342016-11-07 19:49:54 +00003366 {}
3367};
3368
3369static struct ctl_table i915_root[] = {
3370 {
3371 .procname = "i915",
3372 .maxlen = 0,
3373 .mode = 0555,
3374 .child = oa_table,
3375 },
3376 {}
3377};
3378
3379static struct ctl_table dev_root[] = {
3380 {
3381 .procname = "dev",
3382 .maxlen = 0,
3383 .mode = 0555,
3384 .child = i915_root,
3385 },
3386 {}
3387};
3388
Robert Bragg16d98b32016-12-07 21:40:33 +00003389/**
3390 * i915_perf_init - initialize i915-perf state on module load
3391 * @dev_priv: i915 device instance
3392 *
3393 * Initializes i915-perf state without exposing anything to userspace.
3394 *
3395 * Note: i915-perf initialization is split into an 'init' and 'register'
3396 * phase with the i915_perf_register() exposing state to userspace.
3397 */
Robert Braggeec688e2016-11-07 19:49:47 +00003398void i915_perf_init(struct drm_i915_private *dev_priv)
3399{
Robert Bragg19f81df2017-06-13 12:23:03 +01003400 if (IS_HASWELL(dev_priv)) {
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003401 dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3402 gen7_is_valid_b_counter_addr;
3403 dev_priv->perf.oa.ops.is_valid_mux_reg =
3404 hsw_is_valid_mux_addr;
3405 dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
Robert Bragg19f81df2017-06-13 12:23:03 +01003406 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
3407 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
3408 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
3409 dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
3410 dev_priv->perf.oa.ops.read = gen7_oa_read;
3411 dev_priv->perf.oa.ops.oa_hw_tail_read =
3412 gen7_oa_hw_tail_read;
Robert Braggd7965152016-11-07 19:49:52 +00003413
Robert Bragg19f81df2017-06-13 12:23:03 +01003414 dev_priv->perf.oa.oa_formats = hsw_oa_formats;
Chris Wilsonfb5c5512017-11-20 20:55:00 +00003415 } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
Robert Bragg19f81df2017-06-13 12:23:03 +01003416 /* Note: that although we could theoretically also support the
3417 * legacy ringbuffer mode on BDW (and earlier iterations of
3418 * this driver, before upstreaming did this) it didn't seem
3419 * worth the complexity to maintain now that BDW+ enable
3420 * execlist mode by default.
3421 */
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003422 dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
Robert Braggd7965152016-11-07 19:49:52 +00003423
Lionel Landwerlin701f8232017-08-03 17:58:08 +01003424 dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
3425 dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
3426 dev_priv->perf.oa.ops.read = gen8_oa_read;
3427 dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
3428
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003429 if (IS_GEN_RANGE(dev_priv, 8, 9)) {
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003430 dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3431 gen7_is_valid_b_counter_addr;
3432 dev_priv->perf.oa.ops.is_valid_mux_reg =
3433 gen8_is_valid_mux_addr;
3434 dev_priv->perf.oa.ops.is_valid_flex_reg =
3435 gen8_is_valid_flex_addr;
Lionel Landwerlin701f8232017-08-03 17:58:08 +01003436
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003437 if (IS_CHERRYVIEW(dev_priv)) {
3438 dev_priv->perf.oa.ops.is_valid_mux_reg =
3439 chv_is_valid_mux_addr;
3440 }
Robert Bragg155e9412017-06-13 12:23:05 +01003441
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003442 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
3443 dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
3444
Lucas De Marchicf819ef2018-12-12 10:10:43 -08003445 if (IS_GEN(dev_priv, 8)) {
Lionel Landwerlinba6b7c12017-11-10 19:08:41 +00003446 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
3447 dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
3448
3449 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
3450 } else {
3451 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
3452 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
3453
3454 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
3455 }
Lucas De Marchi00690002018-12-12 10:10:42 -08003456 } else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
Lionel Landwerlin95690a02017-11-10 19:08:43 +00003457 dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3458 gen7_is_valid_b_counter_addr;
3459 dev_priv->perf.oa.ops.is_valid_mux_reg =
3460 gen10_is_valid_mux_addr;
3461 dev_priv->perf.oa.ops.is_valid_flex_reg =
3462 gen8_is_valid_flex_addr;
3463
3464 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
3465 dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
3466
3467 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
3468 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
3469
3470 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
Robert Bragg19f81df2017-06-13 12:23:03 +01003471 }
Robert Bragg19f81df2017-06-13 12:23:03 +01003472 }
3473
Lionel Landwerlin9f9b2792017-10-27 15:59:31 +01003474 if (dev_priv->perf.oa.ops.enable_metric_set) {
Robert Bragg19f81df2017-06-13 12:23:03 +01003475 hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
3476 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3477 dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
3478 init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
3479
3480 INIT_LIST_HEAD(&dev_priv->perf.streams);
3481 mutex_init(&dev_priv->perf.lock);
Robert Bragg19f81df2017-06-13 12:23:03 +01003482 spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
3483
Lionel Landwerlin9f9b2792017-10-27 15:59:31 +01003484 oa_sample_rate_hard_limit = 1000 *
Jani Nikula02584042018-12-31 16:56:41 +02003485 (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
Robert Bragg19f81df2017-06-13 12:23:03 +01003486 dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
3487
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003488 mutex_init(&dev_priv->perf.metrics_lock);
3489 idr_init(&dev_priv->perf.metrics_idr);
3490
Robert Bragg19f81df2017-06-13 12:23:03 +01003491 dev_priv->perf.initialized = true;
3492 }
Robert Braggeec688e2016-11-07 19:49:47 +00003493}
3494
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003495static int destroy_config(int id, void *p, void *data)
3496{
3497 struct drm_i915_private *dev_priv = data;
3498 struct i915_oa_config *oa_config = p;
3499
3500 put_oa_config(dev_priv, oa_config);
3501
3502 return 0;
3503}
3504
Robert Bragg16d98b32016-12-07 21:40:33 +00003505/**
3506 * i915_perf_fini - Counter part to i915_perf_init()
3507 * @dev_priv: i915 device instance
3508 */
Robert Braggeec688e2016-11-07 19:49:47 +00003509void i915_perf_fini(struct drm_i915_private *dev_priv)
3510{
3511 if (!dev_priv->perf.initialized)
3512 return;
3513
Lionel Landwerlinf89823c2017-08-03 18:05:50 +01003514 idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
3515 idr_destroy(&dev_priv->perf.metrics_idr);
3516
Robert Braggccdf6342016-11-07 19:49:54 +00003517 unregister_sysctl_table(dev_priv->perf.sysctl_header);
3518
Robert Braggd7965152016-11-07 19:49:52 +00003519 memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
Robert Bragg19f81df2017-06-13 12:23:03 +01003520
Robert Braggeec688e2016-11-07 19:49:47 +00003521 dev_priv->perf.initialized = false;
3522}