blob: a52b7cf1525d77725a5de1f7bf4ed9bf4a0438cd [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsone637d2c2017-03-16 13:19:57 +000029#include <linux/sort.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010030#include <linux/sched/mm.h>
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010031#include <drm/drm_debugfs.h>
32#include <drm/drm_fourcc.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010033#include "intel_drv.h"
Sagar Arun Kamblea2695742017-11-16 19:02:41 +053034#include "intel_guc_submission.h"
Ben Gamari20172632009-02-17 20:08:50 -050035
Chris Wilson9f588922019-01-16 15:33:04 +000036#include "i915_reset.h"
37
David Weinehall36cdd012016-08-22 13:59:31 +030038static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
39{
40 return to_i915(node->minor->dev);
41}
42
Chris Wilson70d39fe2010-08-25 16:03:34 +010043static int i915_capabilities(struct seq_file *m, void *data)
44{
David Weinehall36cdd012016-08-22 13:59:31 +030045 struct drm_i915_private *dev_priv = node_to_i915(m->private);
46 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000047 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010048
David Weinehall36cdd012016-08-22 13:59:31 +030049 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020050 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030051 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000052
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000053 intel_device_info_dump_flags(info, &p);
Jani Nikula02584042018-12-31 16:56:41 +020054 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000055 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010056
Chris Wilson418e3cd2017-02-06 21:36:08 +000057 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000058 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000059 kernel_param_unlock(THIS_MODULE);
60
Chris Wilson70d39fe2010-08-25 16:03:34 +010061 return 0;
62}
Ben Gamari433e12f2009-02-17 20:08:51 -050063
Imre Deaka7363de2016-05-12 16:18:52 +030064static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000065{
Chris Wilson573adb32016-08-04 16:32:39 +010066 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000067}
68
Imre Deaka7363de2016-05-12 16:18:52 +030069static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010070{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010071 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010072}
73
Imre Deaka7363de2016-05-12 16:18:52 +030074static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000075{
Chris Wilson3e510a82016-08-05 10:14:23 +010076 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040077 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010078 case I915_TILING_NONE: return ' ';
79 case I915_TILING_X: return 'X';
80 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040081 }
Chris Wilsona6172a82009-02-11 14:26:38 +000082}
83
Imre Deaka7363de2016-05-12 16:18:52 +030084static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070085{
Chris Wilsona65adaf2017-10-09 09:43:57 +010086 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010087}
88
Imre Deaka7363de2016-05-12 16:18:52 +030089static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010090{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010091 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -070092}
93
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010094static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
95{
96 u64 size = 0;
97 struct i915_vma *vma;
98
Chris Wilsone2189dd2017-12-07 21:14:07 +000099 for_each_ggtt_vma(vma, obj) {
100 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100101 size += vma->node.size;
102 }
103
104 return size;
105}
106
Matthew Auld7393b7e2017-10-06 23:18:28 +0100107static const char *
108stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
109{
110 size_t x = 0;
111
112 switch (page_sizes) {
113 case 0:
114 return "";
115 case I915_GTT_PAGE_SIZE_4K:
116 return "4K";
117 case I915_GTT_PAGE_SIZE_64K:
118 return "64K";
119 case I915_GTT_PAGE_SIZE_2M:
120 return "2M";
121 default:
122 if (!buf)
123 return "M";
124
125 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
126 x += snprintf(buf + x, len - x, "2M, ");
127 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
128 x += snprintf(buf + x, len - x, "64K, ");
129 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
130 x += snprintf(buf + x, len - x, "4K, ");
131 buf[x-2] = '\0';
132
133 return buf;
134 }
135}
136
Chris Wilson37811fc2010-08-25 22:45:57 +0100137static void
138describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139{
Chris Wilsonb4716182015-04-27 13:41:17 +0100140 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000141 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700142 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100143 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800144 int pin_count = 0;
145
Chris Wilson188c1ab2016-04-03 14:14:20 +0100146 lockdep_assert_held(&obj->base.dev->struct_mutex);
147
Chris Wilsond07f0e52016-10-28 13:58:44 +0100148 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100149 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100150 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100151 get_pin_flag(obj),
152 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700153 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100154 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800155 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100156 obj->read_domains,
157 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300158 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100159 obj->mm.dirty ? " dirty" : "",
160 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100161 if (obj->base.name)
162 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson528cbd12019-01-28 10:23:54 +0000163 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100164 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800165 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300166 }
167 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100168 if (obj->pin_global)
169 seq_printf(m, " (global)");
Chris Wilson528cbd12019-01-28 10:23:54 +0000170 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100171 if (!drm_mm_node_allocated(&vma->node))
172 continue;
173
Matthew Auld7393b7e2017-10-06 23:18:28 +0100174 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100175 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100176 vma->node.start, vma->node.size,
177 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000178 if (i915_vma_is_ggtt(vma)) {
179 switch (vma->ggtt_view.type) {
180 case I915_GGTT_VIEW_NORMAL:
181 seq_puts(m, ", normal");
182 break;
183
184 case I915_GGTT_VIEW_PARTIAL:
185 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000186 vma->ggtt_view.partial.offset << PAGE_SHIFT,
187 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000188 break;
189
190 case I915_GGTT_VIEW_ROTATED:
191 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000192 vma->ggtt_view.rotated.plane[0].width,
193 vma->ggtt_view.rotated.plane[0].height,
194 vma->ggtt_view.rotated.plane[0].stride,
195 vma->ggtt_view.rotated.plane[0].offset,
196 vma->ggtt_view.rotated.plane[1].width,
197 vma->ggtt_view.rotated.plane[1].height,
198 vma->ggtt_view.rotated.plane[1].stride,
199 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000200 break;
201
202 default:
203 MISSING_CASE(vma->ggtt_view.type);
204 break;
205 }
206 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100207 if (vma->fence)
208 seq_printf(m, " , fence: %d%s",
209 vma->fence->id,
Chris Wilson21950ee2019-02-05 13:00:05 +0000210 i915_active_request_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000211 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700212 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000213 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100214 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100215
Chris Wilsond07f0e52016-10-28 13:58:44 +0100216 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100217 if (engine)
218 seq_printf(m, " (%s)", engine->name);
219
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100220 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
221 if (frontbuffer_bits)
222 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100223}
224
Chris Wilsone637d2c2017-03-16 13:19:57 +0000225static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100226{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000227 const struct drm_i915_gem_object *a =
228 *(const struct drm_i915_gem_object **)A;
229 const struct drm_i915_gem_object *b =
230 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100231
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200232 if (a->stolen->start < b->stolen->start)
233 return -1;
234 if (a->stolen->start > b->stolen->start)
235 return 1;
236 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100237}
238
239static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
240{
David Weinehall36cdd012016-08-22 13:59:31 +0300241 struct drm_i915_private *dev_priv = node_to_i915(m->private);
242 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000243 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100244 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300245 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000246 unsigned long total, count, n;
247 int ret;
248
249 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200250 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000251 if (!objects)
252 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100253
254 ret = mutex_lock_interruptible(&dev->struct_mutex);
255 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000256 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100257
258 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100259
260 spin_lock(&dev_priv->mm.obj_lock);
261 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000262 if (count == total)
263 break;
264
Chris Wilson6d2b88852013-08-07 18:30:54 +0100265 if (obj->stolen == NULL)
266 continue;
267
Chris Wilsone637d2c2017-03-16 13:19:57 +0000268 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100269 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100270 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000271
Chris Wilson6d2b88852013-08-07 18:30:54 +0100272 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100273 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000274 if (count == total)
275 break;
276
Chris Wilson6d2b88852013-08-07 18:30:54 +0100277 if (obj->stolen == NULL)
278 continue;
279
Chris Wilsone637d2c2017-03-16 13:19:57 +0000280 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100281 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100282 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100283 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100284
Chris Wilsone637d2c2017-03-16 13:19:57 +0000285 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
286
287 seq_puts(m, "Stolen:\n");
288 for (n = 0; n < count; n++) {
289 seq_puts(m, " ");
290 describe_obj(m, objects[n]);
291 seq_putc(m, '\n');
292 }
293 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100294 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000295
296 mutex_unlock(&dev->struct_mutex);
297out:
Michal Hocko20981052017-05-17 14:23:12 +0200298 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000299 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100300}
301
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100302struct file_stats {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000303 struct i915_address_space *vm;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300304 unsigned long count;
305 u64 total, unbound;
306 u64 global, shared;
307 u64 active, inactive;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000308 u64 closed;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100309};
310
311static int per_file_stats(int id, void *ptr, void *data)
312{
313 struct drm_i915_gem_object *obj = ptr;
314 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000315 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100316
Chris Wilson0caf81b2017-06-17 12:57:44 +0100317 lockdep_assert_held(&obj->base.dev->struct_mutex);
318
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100319 stats->count++;
320 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100321 if (!obj->bind_count)
322 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000323 if (obj->base.name || obj->base.dma_buf)
324 stats->shared += obj->base.size;
325
Chris Wilson528cbd12019-01-28 10:23:54 +0000326 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100327 if (!drm_mm_node_allocated(&vma->node))
328 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000329
Chris Wilson3272db52016-08-04 16:32:32 +0100330 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100331 stats->global += vma->node.size;
332 } else {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000333 if (vma->vm != stats->vm)
Chris Wilson6313c202014-03-19 13:45:45 +0000334 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000335 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100336
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100337 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100338 stats->active += vma->node.size;
339 else
340 stats->inactive += vma->node.size;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000341
342 if (i915_vma_is_closed(vma))
343 stats->closed += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100344 }
345
346 return 0;
347}
348
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100349#define print_file_stats(m, name, stats) do { \
350 if (stats.count) \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000351 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100352 name, \
353 stats.count, \
354 stats.total, \
355 stats.active, \
356 stats.inactive, \
357 stats.global, \
358 stats.shared, \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000359 stats.unbound, \
360 stats.closed); \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100361} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800362
363static void print_batch_pool_stats(struct seq_file *m,
364 struct drm_i915_private *dev_priv)
365{
366 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000367 struct intel_engine_cs *engine;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000368 struct file_stats stats = {};
Akash Goel3b3f1652016-10-13 22:44:48 +0530369 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000370 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800371
Akash Goel3b3f1652016-10-13 22:44:48 +0530372 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000373 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100374 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000375 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100376 batch_pool_link)
377 per_file_stats(0, obj, &stats);
378 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100379 }
Brad Volkin493018d2014-12-11 12:13:08 -0800380
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100381 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800382}
383
Chris Wilson15da9562016-05-24 14:53:43 +0100384static void print_context_stats(struct seq_file *m,
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000385 struct drm_i915_private *i915)
Chris Wilson15da9562016-05-24 14:53:43 +0100386{
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000387 struct file_stats kstats = {};
388 struct i915_gem_context *ctx;
Chris Wilson15da9562016-05-24 14:53:43 +0100389
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000390 list_for_each_entry(ctx, &i915->contexts.list, link) {
Chris Wilson7e3d9a52019-03-08 13:25:16 +0000391 struct intel_context *ce;
Chris Wilson15da9562016-05-24 14:53:43 +0100392
Chris Wilson7e3d9a52019-03-08 13:25:16 +0000393 list_for_each_entry(ce, &ctx->active_engines, active_link) {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000394 if (ce->state)
395 per_file_stats(0, ce->state->obj, &kstats);
396 if (ce->ring)
397 per_file_stats(0, ce->ring->vma->obj, &kstats);
398 }
399
400 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
401 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
402 struct drm_file *file = ctx->file_priv->file;
403 struct task_struct *task;
404 char name[80];
405
406 spin_lock(&file->table_lock);
407 idr_for_each(&file->object_idr, per_file_stats, &stats);
408 spin_unlock(&file->table_lock);
409
410 rcu_read_lock();
411 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
412 snprintf(name, sizeof(name), "%s/%d",
413 task ? task->comm : "<unknown>",
414 ctx->user_handle);
415 rcu_read_unlock();
416
417 print_file_stats(m, name, stats);
418 }
Chris Wilson15da9562016-05-24 14:53:43 +0100419 }
Chris Wilson15da9562016-05-24 14:53:43 +0100420
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000421 print_file_stats(m, "[k]contexts", kstats);
Chris Wilson15da9562016-05-24 14:53:43 +0100422}
423
David Weinehall36cdd012016-08-22 13:59:31 +0300424static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100425{
David Weinehall36cdd012016-08-22 13:59:31 +0300426 struct drm_i915_private *dev_priv = node_to_i915(m->private);
427 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300428 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100429 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
430 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000431 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100432 unsigned int page_sizes = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100433 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100434 int ret;
435
Chris Wilson3ef7f222016-10-18 13:02:48 +0100436 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000437 dev_priv->mm.object_count,
438 dev_priv->mm.object_memory);
439
Chris Wilson1544c422016-08-15 13:18:16 +0100440 size = count = 0;
441 mapped_size = mapped_count = 0;
442 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100443 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100444
445 spin_lock(&dev_priv->mm.obj_lock);
446 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100447 size += obj->base.size;
448 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200449
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100450 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200451 purgeable_size += obj->base.size;
452 ++purgeable_count;
453 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100454
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100455 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100456 mapped_count++;
457 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100458 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100459
460 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
461 huge_count++;
462 huge_size += obj->base.size;
463 page_sizes |= obj->mm.page_sizes.sg;
464 }
Chris Wilson6299f992010-11-24 12:23:44 +0000465 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100466 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
467
468 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100469 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100470 size += obj->base.size;
471 ++count;
472
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100473 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100474 dpy_size += obj->base.size;
475 ++dpy_count;
476 }
477
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100478 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100479 purgeable_size += obj->base.size;
480 ++purgeable_count;
481 }
482
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100483 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100484 mapped_count++;
485 mapped_size += obj->base.size;
486 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100487
488 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
489 huge_count++;
490 huge_size += obj->base.size;
491 page_sizes |= obj->mm.page_sizes.sg;
492 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100493 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100494 spin_unlock(&dev_priv->mm.obj_lock);
495
Chris Wilson2bd160a2016-08-15 10:48:45 +0100496 seq_printf(m, "%u bound objects, %llu bytes\n",
497 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300498 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200499 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100500 seq_printf(m, "%u mapped objects, %llu bytes\n",
501 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100502 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
503 huge_count,
504 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
505 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100506 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100507 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000508
Matthew Auldb7128ef2017-12-11 15:18:22 +0000509 seq_printf(m, "%llu [%pa] gtt total\n",
Chris Wilson82ad6442018-06-05 16:37:58 +0100510 ggtt->vm.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100511 seq_printf(m, "Supported page sizes: %s\n",
512 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
513 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100514
Damien Lespiau267f0c92013-06-24 22:59:48 +0100515 seq_putc(m, '\n');
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000516
517 ret = mutex_lock_interruptible(&dev->struct_mutex);
518 if (ret)
519 return ret;
520
Brad Volkin493018d2014-12-11 12:13:08 -0800521 print_batch_pool_stats(m, dev_priv);
Chris Wilson15da9562016-05-24 14:53:43 +0100522 print_context_stats(m, dev_priv);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000523 mutex_unlock(&dev->struct_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100524
525 return 0;
526}
527
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100528static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000529{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100530 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300531 struct drm_i915_private *dev_priv = node_to_i915(node);
532 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100533 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000534 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300535 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100536 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000537 int count, ret;
538
Chris Wilsonf2123812017-10-16 12:40:37 +0100539 nobject = READ_ONCE(dev_priv->mm.object_count);
540 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
541 if (!objects)
542 return -ENOMEM;
543
Chris Wilson08c18322011-01-10 00:00:24 +0000544 ret = mutex_lock_interruptible(&dev->struct_mutex);
545 if (ret)
546 return ret;
547
Chris Wilsonf2123812017-10-16 12:40:37 +0100548 count = 0;
549 spin_lock(&dev_priv->mm.obj_lock);
550 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
551 objects[count++] = obj;
552 if (count == nobject)
553 break;
554 }
555 spin_unlock(&dev_priv->mm.obj_lock);
556
557 total_obj_size = total_gtt_size = 0;
558 for (n = 0; n < count; n++) {
559 obj = objects[n];
560
Damien Lespiau267f0c92013-06-24 22:59:48 +0100561 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000562 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100563 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000564 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100565 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000566 }
567
568 mutex_unlock(&dev->struct_mutex);
569
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300570 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000571 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100572 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000573
574 return 0;
575}
576
Brad Volkin493018d2014-12-11 12:13:08 -0800577static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
578{
David Weinehall36cdd012016-08-22 13:59:31 +0300579 struct drm_i915_private *dev_priv = node_to_i915(m->private);
580 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800581 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000582 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530583 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100584 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000585 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800586
587 ret = mutex_lock_interruptible(&dev->struct_mutex);
588 if (ret)
589 return ret;
590
Akash Goel3b3f1652016-10-13 22:44:48 +0530591 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000592 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100593 int count;
594
595 count = 0;
596 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000597 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100598 batch_pool_link)
599 count++;
600 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000601 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100602
603 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000604 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100605 batch_pool_link) {
606 seq_puts(m, " ");
607 describe_obj(m, obj);
608 seq_putc(m, '\n');
609 }
610
611 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100612 }
Brad Volkin493018d2014-12-11 12:13:08 -0800613 }
614
Chris Wilson8d9d5742015-04-07 16:20:38 +0100615 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800616
617 mutex_unlock(&dev->struct_mutex);
618
619 return 0;
620}
621
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200622static void gen8_display_interrupt_info(struct seq_file *m)
623{
624 struct drm_i915_private *dev_priv = node_to_i915(m->private);
625 int pipe;
626
627 for_each_pipe(dev_priv, pipe) {
628 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000629 intel_wakeref_t wakeref;
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200630
631 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000632 wakeref = intel_display_power_get_if_enabled(dev_priv,
633 power_domain);
634 if (!wakeref) {
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200635 seq_printf(m, "Pipe %c power disabled\n",
636 pipe_name(pipe));
637 continue;
638 }
639 seq_printf(m, "Pipe %c IMR:\t%08x\n",
640 pipe_name(pipe),
641 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
642 seq_printf(m, "Pipe %c IIR:\t%08x\n",
643 pipe_name(pipe),
644 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
645 seq_printf(m, "Pipe %c IER:\t%08x\n",
646 pipe_name(pipe),
647 I915_READ(GEN8_DE_PIPE_IER(pipe)));
648
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000649 intel_display_power_put(dev_priv, power_domain, wakeref);
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200650 }
651
652 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
653 I915_READ(GEN8_DE_PORT_IMR));
654 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
655 I915_READ(GEN8_DE_PORT_IIR));
656 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
657 I915_READ(GEN8_DE_PORT_IER));
658
659 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
660 I915_READ(GEN8_DE_MISC_IMR));
661 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
662 I915_READ(GEN8_DE_MISC_IIR));
663 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
664 I915_READ(GEN8_DE_MISC_IER));
665
666 seq_printf(m, "PCU interrupt mask:\t%08x\n",
667 I915_READ(GEN8_PCU_IMR));
668 seq_printf(m, "PCU interrupt identity:\t%08x\n",
669 I915_READ(GEN8_PCU_IIR));
670 seq_printf(m, "PCU interrupt enable:\t%08x\n",
671 I915_READ(GEN8_PCU_IER));
672}
673
Ben Gamari20172632009-02-17 20:08:50 -0500674static int i915_interrupt_info(struct seq_file *m, void *data)
675{
David Weinehall36cdd012016-08-22 13:59:31 +0300676 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000677 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530678 enum intel_engine_id id;
Chris Wilsona0371212019-01-14 14:21:14 +0000679 intel_wakeref_t wakeref;
Chris Wilson4bb05042016-09-03 07:53:43 +0100680 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100681
Chris Wilsona0371212019-01-14 14:21:14 +0000682 wakeref = intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500683
David Weinehall36cdd012016-08-22 13:59:31 +0300684 if (IS_CHERRYVIEW(dev_priv)) {
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000685 intel_wakeref_t pref;
686
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300687 seq_printf(m, "Master Interrupt Control:\t%08x\n",
688 I915_READ(GEN8_MASTER_IRQ));
689
690 seq_printf(m, "Display IER:\t%08x\n",
691 I915_READ(VLV_IER));
692 seq_printf(m, "Display IIR:\t%08x\n",
693 I915_READ(VLV_IIR));
694 seq_printf(m, "Display IIR_RW:\t%08x\n",
695 I915_READ(VLV_IIR_RW));
696 seq_printf(m, "Display IMR:\t%08x\n",
697 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100698 for_each_pipe(dev_priv, pipe) {
699 enum intel_display_power_domain power_domain;
700
701 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000702 pref = intel_display_power_get_if_enabled(dev_priv,
703 power_domain);
704 if (!pref) {
Chris Wilson9c870d02016-10-24 13:42:15 +0100705 seq_printf(m, "Pipe %c power disabled\n",
706 pipe_name(pipe));
707 continue;
708 }
709
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300710 seq_printf(m, "Pipe %c stat:\t%08x\n",
711 pipe_name(pipe),
712 I915_READ(PIPESTAT(pipe)));
713
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000714 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson9c870d02016-10-24 13:42:15 +0100715 }
716
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000717 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300718 seq_printf(m, "Port hotplug:\t%08x\n",
719 I915_READ(PORT_HOTPLUG_EN));
720 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
721 I915_READ(VLV_DPFLIPSTAT));
722 seq_printf(m, "DPINVGTT:\t%08x\n",
723 I915_READ(DPINVGTT));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000724 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300725
726 for (i = 0; i < 4; i++) {
727 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
728 i, I915_READ(GEN8_GT_IMR(i)));
729 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
730 i, I915_READ(GEN8_GT_IIR(i)));
731 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
732 i, I915_READ(GEN8_GT_IER(i)));
733 }
734
735 seq_printf(m, "PCU interrupt mask:\t%08x\n",
736 I915_READ(GEN8_PCU_IMR));
737 seq_printf(m, "PCU interrupt identity:\t%08x\n",
738 I915_READ(GEN8_PCU_IIR));
739 seq_printf(m, "PCU interrupt enable:\t%08x\n",
740 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200741 } else if (INTEL_GEN(dev_priv) >= 11) {
742 seq_printf(m, "Master Interrupt Control: %08x\n",
743 I915_READ(GEN11_GFX_MSTR_IRQ));
744
745 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
746 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
747 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
748 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
749 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
750 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
751 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
752 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
753 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
754 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
755 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
756 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
757
758 seq_printf(m, "Display Interrupt Control:\t%08x\n",
759 I915_READ(GEN11_DISPLAY_INT_CTL));
760
761 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300762 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700763 seq_printf(m, "Master Interrupt Control:\t%08x\n",
764 I915_READ(GEN8_MASTER_IRQ));
765
766 for (i = 0; i < 4; i++) {
767 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
768 i, I915_READ(GEN8_GT_IMR(i)));
769 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
770 i, I915_READ(GEN8_GT_IIR(i)));
771 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
772 i, I915_READ(GEN8_GT_IER(i)));
773 }
774
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200775 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300776 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700777 seq_printf(m, "Display IER:\t%08x\n",
778 I915_READ(VLV_IER));
779 seq_printf(m, "Display IIR:\t%08x\n",
780 I915_READ(VLV_IIR));
781 seq_printf(m, "Display IIR_RW:\t%08x\n",
782 I915_READ(VLV_IIR_RW));
783 seq_printf(m, "Display IMR:\t%08x\n",
784 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000785 for_each_pipe(dev_priv, pipe) {
786 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000787 intel_wakeref_t pref;
Chris Wilson4f4631a2017-02-10 13:36:32 +0000788
789 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000790 pref = intel_display_power_get_if_enabled(dev_priv,
791 power_domain);
792 if (!pref) {
Chris Wilson4f4631a2017-02-10 13:36:32 +0000793 seq_printf(m, "Pipe %c power disabled\n",
794 pipe_name(pipe));
795 continue;
796 }
797
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700798 seq_printf(m, "Pipe %c stat:\t%08x\n",
799 pipe_name(pipe),
800 I915_READ(PIPESTAT(pipe)));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000801 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson4f4631a2017-02-10 13:36:32 +0000802 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700803
804 seq_printf(m, "Master IER:\t%08x\n",
805 I915_READ(VLV_MASTER_IER));
806
807 seq_printf(m, "Render IER:\t%08x\n",
808 I915_READ(GTIER));
809 seq_printf(m, "Render IIR:\t%08x\n",
810 I915_READ(GTIIR));
811 seq_printf(m, "Render IMR:\t%08x\n",
812 I915_READ(GTIMR));
813
814 seq_printf(m, "PM IER:\t\t%08x\n",
815 I915_READ(GEN6_PMIER));
816 seq_printf(m, "PM IIR:\t\t%08x\n",
817 I915_READ(GEN6_PMIIR));
818 seq_printf(m, "PM IMR:\t\t%08x\n",
819 I915_READ(GEN6_PMIMR));
820
821 seq_printf(m, "Port hotplug:\t%08x\n",
822 I915_READ(PORT_HOTPLUG_EN));
823 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
824 I915_READ(VLV_DPFLIPSTAT));
825 seq_printf(m, "DPINVGTT:\t%08x\n",
826 I915_READ(DPINVGTT));
827
David Weinehall36cdd012016-08-22 13:59:31 +0300828 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800829 seq_printf(m, "Interrupt enable: %08x\n",
830 I915_READ(IER));
831 seq_printf(m, "Interrupt identity: %08x\n",
832 I915_READ(IIR));
833 seq_printf(m, "Interrupt mask: %08x\n",
834 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100835 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800836 seq_printf(m, "Pipe %c stat: %08x\n",
837 pipe_name(pipe),
838 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800839 } else {
840 seq_printf(m, "North Display Interrupt enable: %08x\n",
841 I915_READ(DEIER));
842 seq_printf(m, "North Display Interrupt identity: %08x\n",
843 I915_READ(DEIIR));
844 seq_printf(m, "North Display Interrupt mask: %08x\n",
845 I915_READ(DEIMR));
846 seq_printf(m, "South Display Interrupt enable: %08x\n",
847 I915_READ(SDEIER));
848 seq_printf(m, "South Display Interrupt identity: %08x\n",
849 I915_READ(SDEIIR));
850 seq_printf(m, "South Display Interrupt mask: %08x\n",
851 I915_READ(SDEIMR));
852 seq_printf(m, "Graphics Interrupt enable: %08x\n",
853 I915_READ(GTIER));
854 seq_printf(m, "Graphics Interrupt identity: %08x\n",
855 I915_READ(GTIIR));
856 seq_printf(m, "Graphics Interrupt mask: %08x\n",
857 I915_READ(GTIMR));
858 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200859
860 if (INTEL_GEN(dev_priv) >= 11) {
861 seq_printf(m, "RCS Intr Mask:\t %08x\n",
862 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
863 seq_printf(m, "BCS Intr Mask:\t %08x\n",
864 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
865 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
866 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
867 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
868 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
869 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
870 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
871 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
872 I915_READ(GEN11_GUC_SG_INTR_MASK));
873 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
874 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
875 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
876 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
877 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
878 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
879
880 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000881 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100882 seq_printf(m,
883 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000884 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000885 }
Chris Wilson9862e602011-01-04 22:22:17 +0000886 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200887
Chris Wilsona0371212019-01-14 14:21:14 +0000888 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100889
Ben Gamari20172632009-02-17 20:08:50 -0500890 return 0;
891}
892
Chris Wilsona6172a82009-02-11 14:26:38 +0000893static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
894{
David Weinehall36cdd012016-08-22 13:59:31 +0300895 struct drm_i915_private *dev_priv = node_to_i915(m->private);
896 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100897 int i, ret;
898
899 ret = mutex_lock_interruptible(&dev->struct_mutex);
900 if (ret)
901 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000902
Chris Wilsona6172a82009-02-11 14:26:38 +0000903 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
904 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100905 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000906
Chris Wilson6c085a72012-08-20 11:40:46 +0200907 seq_printf(m, "Fence %d, pin count = %d, object = ",
908 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100909 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100910 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100911 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100912 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100913 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000914 }
915
Chris Wilson05394f32010-11-08 19:18:58 +0000916 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000917 return 0;
918}
919
Chris Wilson98a2f412016-10-12 10:05:18 +0100920#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000921static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
922 size_t count, loff_t *pos)
923{
Chris Wilson0e390372018-11-23 13:23:25 +0000924 struct i915_gpu_state *error;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000925 ssize_t ret;
Chris Wilson0e390372018-11-23 13:23:25 +0000926 void *buf;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000927
Chris Wilson0e390372018-11-23 13:23:25 +0000928 error = file->private_data;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000929 if (!error)
930 return 0;
931
Chris Wilson0e390372018-11-23 13:23:25 +0000932 /* Bounce buffer required because of kernfs __user API convenience. */
933 buf = kmalloc(count, GFP_KERNEL);
934 if (!buf)
935 return -ENOMEM;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000936
Chris Wilson0e390372018-11-23 13:23:25 +0000937 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
938 if (ret <= 0)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000939 goto out;
940
Chris Wilson0e390372018-11-23 13:23:25 +0000941 if (!copy_to_user(ubuf, buf, ret))
942 *pos += ret;
943 else
944 ret = -EFAULT;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000945
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000946out:
Chris Wilson0e390372018-11-23 13:23:25 +0000947 kfree(buf);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000948 return ret;
949}
950
951static int gpu_state_release(struct inode *inode, struct file *file)
952{
953 i915_gpu_state_put(file->private_data);
954 return 0;
955}
956
957static int i915_gpu_info_open(struct inode *inode, struct file *file)
958{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100959 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000960 struct i915_gpu_state *gpu;
Chris Wilsona0371212019-01-14 14:21:14 +0000961 intel_wakeref_t wakeref;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000962
Chris Wilsond4225a52019-01-14 14:21:23 +0000963 gpu = NULL;
964 with_intel_runtime_pm(i915, wakeref)
965 gpu = i915_capture_gpu_state(i915);
Chris Wilsone6154e42018-12-07 11:05:54 +0000966 if (IS_ERR(gpu))
967 return PTR_ERR(gpu);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000968
969 file->private_data = gpu;
970 return 0;
971}
972
973static const struct file_operations i915_gpu_info_fops = {
974 .owner = THIS_MODULE,
975 .open = i915_gpu_info_open,
976 .read = gpu_state_read,
977 .llseek = default_llseek,
978 .release = gpu_state_release,
979};
Chris Wilson98a2f412016-10-12 10:05:18 +0100980
Daniel Vetterd5442302012-04-27 15:17:40 +0200981static ssize_t
982i915_error_state_write(struct file *filp,
983 const char __user *ubuf,
984 size_t cnt,
985 loff_t *ppos)
986{
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000987 struct i915_gpu_state *error = filp->private_data;
988
989 if (!error)
990 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200991
992 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000993 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +0200994
995 return cnt;
996}
997
998static int i915_error_state_open(struct inode *inode, struct file *file)
999{
Chris Wilsone6154e42018-12-07 11:05:54 +00001000 struct i915_gpu_state *error;
1001
1002 error = i915_first_error_state(inode->i_private);
1003 if (IS_ERR(error))
1004 return PTR_ERR(error);
1005
1006 file->private_data = error;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001007 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001008}
1009
Daniel Vetterd5442302012-04-27 15:17:40 +02001010static const struct file_operations i915_error_state_fops = {
1011 .owner = THIS_MODULE,
1012 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001013 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001014 .write = i915_error_state_write,
1015 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001016 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001017};
Chris Wilson98a2f412016-10-12 10:05:18 +01001018#endif
1019
Deepak Sadb4bd12014-03-31 11:30:02 +05301020static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001021{
David Weinehall36cdd012016-08-22 13:59:31 +03001022 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001023 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsona0371212019-01-14 14:21:14 +00001024 intel_wakeref_t wakeref;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001025 int ret = 0;
1026
Chris Wilsona0371212019-01-14 14:21:14 +00001027 wakeref = intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001028
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001029 if (IS_GEN(dev_priv, 5)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001030 u16 rgvswctl = I915_READ16(MEMSWCTL);
1031 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1032
1033 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1034 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1035 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1036 MEMSTAT_VID_SHIFT);
1037 seq_printf(m, "Current P-state: %d\n",
1038 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001039 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001040 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001041
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001042 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001043
1044 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1045 seq_printf(m, "Video Turbo Mode: %s\n",
1046 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1047 seq_printf(m, "HW control enabled: %s\n",
1048 yesno(rpmodectl & GEN6_RP_ENABLE));
1049 seq_printf(m, "SW control enabled: %s\n",
1050 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1051 GEN6_RP_MEDIA_SW_MODE));
1052
Wayne Boyer666a4532015-12-09 12:29:35 -08001053 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1054 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1055 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1056
1057 seq_printf(m, "actual GPU freq: %d MHz\n",
1058 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1059
1060 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001061 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001062
1063 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001064 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001065
1066 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001067 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001068
1069 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001070 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001071
1072 seq_printf(m,
1073 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001074 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001075 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001076 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001077 u32 rp_state_limits;
1078 u32 gt_perf_status;
1079 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001080 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001081 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001082 u32 rpupei, rpcurup, rpprevup;
1083 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001084 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001085 int max_freq;
1086
Bob Paauwe35040562015-06-25 14:54:07 -07001087 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001088 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001089 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1090 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1091 } else {
1092 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1093 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1094 }
1095
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001096 /* RPSTAT1 is in the GT power well */
Mika Kuoppala59bad942015-01-16 11:34:40 +02001097 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001098
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001099 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001100 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301101 reqf >>= 23;
1102 else {
1103 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001104 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301105 reqf >>= 24;
1106 else
1107 reqf >>= 25;
1108 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001109 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001110
Chris Wilson0d8f9492014-03-27 09:06:14 +00001111 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1112 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1113 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1114
Jesse Barnesccab5c82011-01-18 15:49:25 -08001115 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301116 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1117 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1118 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1119 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1120 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1121 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001122 cagf = intel_gpu_freq(dev_priv,
1123 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001124
Mika Kuoppala59bad942015-01-16 11:34:40 +02001125 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001126
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001127 if (INTEL_GEN(dev_priv) >= 11) {
1128 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1129 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1130 /*
1131 * The equivalent to the PM ISR & IIR cannot be read
1132 * without affecting the current state of the system
1133 */
1134 pm_isr = 0;
1135 pm_iir = 0;
1136 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001137 pm_ier = I915_READ(GEN8_GT_IER(2));
1138 pm_imr = I915_READ(GEN8_GT_IMR(2));
1139 pm_isr = I915_READ(GEN8_GT_ISR(2));
1140 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001141 } else {
1142 pm_ier = I915_READ(GEN6_PMIER);
1143 pm_imr = I915_READ(GEN6_PMIMR);
1144 pm_isr = I915_READ(GEN6_PMISR);
1145 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001146 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001147 pm_mask = I915_READ(GEN6_PMINTRMSK);
1148
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001149 seq_printf(m, "Video Turbo Mode: %s\n",
1150 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1151 seq_printf(m, "HW control enabled: %s\n",
1152 yesno(rpmodectl & GEN6_RP_ENABLE));
1153 seq_printf(m, "SW control enabled: %s\n",
1154 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1155 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001156
1157 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1158 pm_ier, pm_imr, pm_mask);
1159 if (INTEL_GEN(dev_priv) <= 10)
1160 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1161 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301162 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001163 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001164 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001165 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001166 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001167 seq_printf(m, "Render p-state VID: %d\n",
1168 gt_perf_status & 0xff);
1169 seq_printf(m, "Render p-state limit: %d\n",
1170 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001171 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1172 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1173 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1174 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001175 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001176 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301177 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1178 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1179 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1180 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1181 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1182 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilson60548c52018-07-31 14:26:29 +01001183 seq_printf(m, "Up threshold: %d%%\n",
1184 rps->power.up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001185
Akash Goeld6cda9c2016-04-23 00:05:46 +05301186 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1187 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1188 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1189 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1190 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1191 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilson60548c52018-07-31 14:26:29 +01001192 seq_printf(m, "Down threshold: %d%%\n",
1193 rps->power.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001194
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001195 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001196 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001197 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001198 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001199 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001200 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001201
1202 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001203 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001204 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001205 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001206 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001207
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001208 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001209 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001210 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001211 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001212 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001213 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001214 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001215 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001216
Chris Wilsond86ed342015-04-27 13:41:19 +01001217 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001218 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001219 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001220 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001221 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001222 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001223 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001224 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001225 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001226 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001227 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001228 seq_printf(m,
1229 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001230 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001231 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001232 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001233 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001234
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001235 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001236 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1237 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1238
Chris Wilsona0371212019-01-14 14:21:14 +00001239 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001240 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001241}
1242
Ben Widawskyd6369512016-09-20 16:54:32 +03001243static void i915_instdone_info(struct drm_i915_private *dev_priv,
1244 struct seq_file *m,
1245 struct intel_instdone *instdone)
1246{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001247 int slice;
1248 int subslice;
1249
Ben Widawskyd6369512016-09-20 16:54:32 +03001250 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1251 instdone->instdone);
1252
1253 if (INTEL_GEN(dev_priv) <= 3)
1254 return;
1255
1256 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1257 instdone->slice_common);
1258
1259 if (INTEL_GEN(dev_priv) <= 6)
1260 return;
1261
Ben Widawskyf9e61372016-09-20 16:54:33 +03001262 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1263 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1264 slice, subslice, instdone->sampler[slice][subslice]);
1265
1266 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1267 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1268 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001269}
1270
Chris Wilsonf6544492015-01-26 18:03:04 +02001271static int i915_hangcheck_info(struct seq_file *m, void *unused)
1272{
David Weinehall36cdd012016-08-22 13:59:31 +03001273 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001274 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001275 u64 acthd[I915_NUM_ENGINES];
1276 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001277 struct intel_instdone instdone;
Chris Wilsona0371212019-01-14 14:21:14 +00001278 intel_wakeref_t wakeref;
Dave Gordonc3232b12016-03-23 18:19:53 +00001279 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001280
Chris Wilson2caffbf2019-02-08 15:37:03 +00001281 seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
Chris Wilson8af29b02016-09-09 14:11:47 +01001282 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001283 seq_puts(m, "\tWedged\n");
Chris Wilson8c185ec2017-03-16 17:13:02 +00001284 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001285 seq_puts(m, "\tDevice (global) reset in progress\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001286
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001287 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001288 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001289 return 0;
1290 }
1291
Chris Wilsond4225a52019-01-14 14:21:23 +00001292 with_intel_runtime_pm(dev_priv, wakeref) {
1293 for_each_engine(engine, dev_priv, id) {
1294 acthd[id] = intel_engine_get_active_head(engine);
Chris Wilson89531e72019-02-26 09:49:19 +00001295 seqno[id] = intel_engine_get_hangcheck_seqno(engine);
Chris Wilsond4225a52019-01-14 14:21:23 +00001296 }
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001297
Chris Wilson8a68d462019-03-05 18:03:30 +00001298 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001299 }
1300
Chris Wilson8352aea2017-03-03 09:00:56 +00001301 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1302 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001303 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1304 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001305 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1306 seq_puts(m, "Hangcheck active, work pending\n");
1307 else
1308 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001309
Chris Wilsonf73b5672017-03-02 15:03:56 +00001310 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1311
Akash Goel3b3f1652016-10-13 22:44:48 +05301312 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001313 seq_printf(m, "%s:\n", engine->name);
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001314 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
Chris Wilson89531e72019-02-26 09:49:19 +00001315 engine->hangcheck.last_seqno,
1316 seqno[id],
1317 engine->hangcheck.next_seqno,
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001318 jiffies_to_msecs(jiffies -
1319 engine->hangcheck.action_timestamp));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001320
Chris Wilsonf6544492015-01-26 18:03:04 +02001321 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001322 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001323 (long long)acthd[id]);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001324
Chris Wilson8a68d462019-03-05 18:03:30 +00001325 if (engine->id == RCS0) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001326 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001327
Ben Widawskyd6369512016-09-20 16:54:32 +03001328 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001329
Ben Widawskyd6369512016-09-20 16:54:32 +03001330 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001331
Ben Widawskyd6369512016-09-20 16:54:32 +03001332 i915_instdone_info(dev_priv, m,
1333 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001334 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001335 }
1336
1337 return 0;
1338}
1339
Michel Thierry061d06a2017-06-20 10:57:49 +01001340static int i915_reset_info(struct seq_file *m, void *unused)
1341{
1342 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1343 struct i915_gpu_error *error = &dev_priv->gpu_error;
1344 struct intel_engine_cs *engine;
1345 enum intel_engine_id id;
1346
1347 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1348
1349 for_each_engine(engine, dev_priv, id) {
1350 seq_printf(m, "%s = %u\n", engine->name,
1351 i915_reset_engine_count(error, engine));
1352 }
1353
1354 return 0;
1355}
1356
Ben Widawsky4d855292011-12-12 19:34:16 -08001357static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001358{
David Weinehall36cdd012016-08-22 13:59:31 +03001359 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001360 u32 rgvmodectl, rstdbyctl;
1361 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001362
Ben Widawsky616fdb52011-10-05 11:44:54 -07001363 rgvmodectl = I915_READ(MEMMODECTL);
1364 rstdbyctl = I915_READ(RSTDBYCTL);
1365 crstandvid = I915_READ16(CRSTANDVID);
1366
Jani Nikula742f4912015-09-03 11:16:09 +03001367 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001368 seq_printf(m, "Boost freq: %d\n",
1369 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1370 MEMMODE_BOOST_FREQ_SHIFT);
1371 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001372 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001373 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001374 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001375 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001376 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001377 seq_printf(m, "Starting frequency: P%d\n",
1378 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001379 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001380 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001381 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1382 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1383 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1384 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001385 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001386 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001387 switch (rstdbyctl & RSX_STATUS_MASK) {
1388 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001389 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001390 break;
1391 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001392 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001393 break;
1394 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001395 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001396 break;
1397 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001398 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001399 break;
1400 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001401 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001402 break;
1403 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001404 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001405 break;
1406 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001407 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001408 break;
1409 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001410
1411 return 0;
1412}
1413
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001414static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001415{
Chris Wilson233ebf52017-03-23 10:19:44 +00001416 struct drm_i915_private *i915 = node_to_i915(m->private);
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001417 struct intel_uncore *uncore = &i915->uncore;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001418 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001419 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001420
Chris Wilsond7a133d2017-09-07 14:44:41 +01001421 seq_printf(m, "user.bypass_count = %u\n",
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001422 uncore->user_forcewake.count);
Chris Wilsond7a133d2017-09-07 14:44:41 +01001423
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001424 for_each_fw_domain(fw_domain, uncore, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001425 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001426 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001427 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001428
1429 return 0;
1430}
1431
Mika Kuoppala13628772017-03-15 17:43:02 +02001432static void print_rc6_res(struct seq_file *m,
1433 const char *title,
1434 const i915_reg_t reg)
1435{
1436 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1437
1438 seq_printf(m, "%s %u (%llu us)\n",
1439 title, I915_READ(reg),
1440 intel_rc6_residency_us(dev_priv, reg));
1441}
1442
Deepak S669ab5a2014-01-10 15:18:26 +05301443static int vlv_drpc_info(struct seq_file *m)
1444{
David Weinehall36cdd012016-08-22 13:59:31 +03001445 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001446 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301447
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001448 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301449 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1450
Deepak S669ab5a2014-01-10 15:18:26 +05301451 seq_printf(m, "RC6 Enabled: %s\n",
1452 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1453 GEN6_RC_CTL_EI_MODE(1))));
1454 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001455 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301456 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001457 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301458
Mika Kuoppala13628772017-03-15 17:43:02 +02001459 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1460 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001461
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001462 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301463}
1464
Ben Widawsky4d855292011-12-12 19:34:16 -08001465static int gen6_drpc_info(struct seq_file *m)
1466{
David Weinehall36cdd012016-08-22 13:59:31 +03001467 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001468 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301469 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001470
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001471 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001472 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001473
Ben Widawsky4d855292011-12-12 19:34:16 -08001474 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001475 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301476 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1477 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1478 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001479
Imre Deak51cc9ad2018-02-08 19:41:02 +02001480 if (INTEL_GEN(dev_priv) <= 7) {
1481 mutex_lock(&dev_priv->pcu_lock);
1482 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1483 &rc6vids);
1484 mutex_unlock(&dev_priv->pcu_lock);
1485 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001486
Eric Anholtfff24e22012-01-23 16:14:05 -08001487 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001488 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1489 seq_printf(m, "RC6 Enabled: %s\n",
1490 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001491 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301492 seq_printf(m, "Render Well Gating Enabled: %s\n",
1493 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1494 seq_printf(m, "Media Well Gating Enabled: %s\n",
1495 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1496 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001497 seq_printf(m, "Deep RC6 Enabled: %s\n",
1498 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1499 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1500 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001501 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001502 switch (gt_core_status & GEN6_RCn_MASK) {
1503 case GEN6_RC0:
1504 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001505 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001506 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001507 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001508 break;
1509 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001510 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001511 break;
1512 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001513 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001514 break;
1515 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001516 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001517 break;
1518 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001519 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001520 break;
1521 }
1522
1523 seq_printf(m, "Core Power Down: %s\n",
1524 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001525 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301526 seq_printf(m, "Render Power Well: %s\n",
1527 (gen9_powergate_status &
1528 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1529 seq_printf(m, "Media Power Well: %s\n",
1530 (gen9_powergate_status &
1531 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1532 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001533
1534 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001535 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1536 GEN6_GT_GFX_RC6_LOCKED);
1537 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1538 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1539 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001540
Imre Deak51cc9ad2018-02-08 19:41:02 +02001541 if (INTEL_GEN(dev_priv) <= 7) {
1542 seq_printf(m, "RC6 voltage: %dmV\n",
1543 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1544 seq_printf(m, "RC6+ voltage: %dmV\n",
1545 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1546 seq_printf(m, "RC6++ voltage: %dmV\n",
1547 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1548 }
1549
Akash Goelf2dd7572016-06-27 20:10:01 +05301550 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001551}
1552
1553static int i915_drpc_info(struct seq_file *m, void *unused)
1554{
David Weinehall36cdd012016-08-22 13:59:31 +03001555 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001556 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00001557 int err = -ENODEV;
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001558
Chris Wilsond4225a52019-01-14 14:21:23 +00001559 with_intel_runtime_pm(dev_priv, wakeref) {
1560 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1561 err = vlv_drpc_info(m);
1562 else if (INTEL_GEN(dev_priv) >= 6)
1563 err = gen6_drpc_info(m);
1564 else
1565 err = ironlake_drpc_info(m);
1566 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001567
1568 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001569}
1570
Daniel Vetter9a851782015-06-18 10:30:22 +02001571static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1572{
David Weinehall36cdd012016-08-22 13:59:31 +03001573 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001574
1575 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1576 dev_priv->fb_tracking.busy_bits);
1577
1578 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1579 dev_priv->fb_tracking.flip_bits);
1580
1581 return 0;
1582}
1583
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001584static int i915_fbc_status(struct seq_file *m, void *unused)
1585{
David Weinehall36cdd012016-08-22 13:59:31 +03001586 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001587 struct intel_fbc *fbc = &dev_priv->fbc;
Chris Wilsona0371212019-01-14 14:21:14 +00001588 intel_wakeref_t wakeref;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001589
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001590 if (!HAS_FBC(dev_priv))
1591 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001592
Chris Wilsona0371212019-01-14 14:21:14 +00001593 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001594 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001595
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001596 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001597 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001598 else
Chris Wilson31388722017-12-20 20:58:48 +00001599 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1600
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001601 if (intel_fbc_is_active(dev_priv)) {
1602 u32 mask;
1603
1604 if (INTEL_GEN(dev_priv) >= 8)
1605 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1606 else if (INTEL_GEN(dev_priv) >= 7)
1607 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1608 else if (INTEL_GEN(dev_priv) >= 5)
1609 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1610 else if (IS_G4X(dev_priv))
1611 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1612 else
1613 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1614 FBC_STAT_COMPRESSED);
1615
1616 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001617 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001618
Chris Wilson31388722017-12-20 20:58:48 +00001619 mutex_unlock(&fbc->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00001620 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001621
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001622 return 0;
1623}
1624
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001625static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001626{
David Weinehall36cdd012016-08-22 13:59:31 +03001627 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001628
David Weinehall36cdd012016-08-22 13:59:31 +03001629 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001630 return -ENODEV;
1631
Rodrigo Vivida46f932014-08-01 02:04:45 -07001632 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001633
1634 return 0;
1635}
1636
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001637static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001638{
David Weinehall36cdd012016-08-22 13:59:31 +03001639 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001640 u32 reg;
1641
David Weinehall36cdd012016-08-22 13:59:31 +03001642 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001643 return -ENODEV;
1644
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001645 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001646
1647 reg = I915_READ(ILK_DPFC_CONTROL);
1648 dev_priv->fbc.false_color = val;
1649
1650 I915_WRITE(ILK_DPFC_CONTROL, val ?
1651 (reg | FBC_CTL_FALSE_COLOR) :
1652 (reg & ~FBC_CTL_FALSE_COLOR));
1653
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001654 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001655 return 0;
1656}
1657
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001658DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1659 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001660 "%llu\n");
1661
Paulo Zanoni92d44622013-05-31 16:33:24 -03001662static int i915_ips_status(struct seq_file *m, void *unused)
1663{
David Weinehall36cdd012016-08-22 13:59:31 +03001664 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001665 intel_wakeref_t wakeref;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001666
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001667 if (!HAS_IPS(dev_priv))
1668 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001669
Chris Wilsona0371212019-01-14 14:21:14 +00001670 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001671
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001672 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001673 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001674
David Weinehall36cdd012016-08-22 13:59:31 +03001675 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001676 seq_puts(m, "Currently: unknown\n");
1677 } else {
1678 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1679 seq_puts(m, "Currently: enabled\n");
1680 else
1681 seq_puts(m, "Currently: disabled\n");
1682 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001683
Chris Wilsona0371212019-01-14 14:21:14 +00001684 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001685
Paulo Zanoni92d44622013-05-31 16:33:24 -03001686 return 0;
1687}
1688
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001689static int i915_sr_status(struct seq_file *m, void *unused)
1690{
David Weinehall36cdd012016-08-22 13:59:31 +03001691 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001692 intel_wakeref_t wakeref;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001693 bool sr_enabled = false;
1694
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001695 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001696
Chris Wilson7342a722017-03-09 14:20:49 +00001697 if (INTEL_GEN(dev_priv) >= 9)
1698 /* no global SR status; inspect per-plane WM */;
1699 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001700 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001701 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001702 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001703 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001704 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001705 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001706 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001707 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001708 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001709 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001710
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001711 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001712
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001713 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001714
1715 return 0;
1716}
1717
Jesse Barnes7648fa92010-05-20 14:28:11 -07001718static int i915_emon_status(struct seq_file *m, void *unused)
1719{
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001720 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001721 intel_wakeref_t wakeref;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001722
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001723 if (!IS_GEN(i915, 5))
Chris Wilson582be6b2012-04-30 19:35:02 +01001724 return -ENODEV;
1725
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001726 with_intel_runtime_pm(i915, wakeref) {
1727 unsigned long temp, chipset, gfx;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001728
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001729 temp = i915_mch_val(i915);
1730 chipset = i915_chipset_val(i915);
1731 gfx = i915_gfx_val(i915);
Chris Wilsona0371212019-01-14 14:21:14 +00001732
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001733 seq_printf(m, "GMCH temp: %ld\n", temp);
1734 seq_printf(m, "Chipset power: %ld\n", chipset);
1735 seq_printf(m, "GFX power: %ld\n", gfx);
1736 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1737 }
Jesse Barnes7648fa92010-05-20 14:28:11 -07001738
1739 return 0;
1740}
1741
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001742static int i915_ring_freq_table(struct seq_file *m, void *unused)
1743{
David Weinehall36cdd012016-08-22 13:59:31 +03001744 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001745 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301746 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001747 intel_wakeref_t wakeref;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001748 int gpu_freq, ia_freq;
1749 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001750
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001751 if (!HAS_LLC(dev_priv))
1752 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001753
Chris Wilsona0371212019-01-14 14:21:14 +00001754 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001755
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001756 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001757 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001758 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001759
Chris Wilsond586b5f2018-03-08 14:26:48 +00001760 min_gpu_freq = rps->min_freq;
1761 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001762 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301763 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001764 min_gpu_freq /= GEN9_FREQ_SCALER;
1765 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301766 }
1767
Damien Lespiau267f0c92013-06-24 22:59:48 +01001768 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001769
Akash Goelf936ec32015-06-29 14:50:22 +05301770 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001771 ia_freq = gpu_freq;
1772 sandybridge_pcode_read(dev_priv,
1773 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1774 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001775 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301776 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001777 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001778 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001779 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001780 ((ia_freq >> 0) & 0xff) * 100,
1781 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001782 }
1783
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001784 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001785
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001786out:
Chris Wilsona0371212019-01-14 14:21:14 +00001787 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001788 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001789}
1790
Chris Wilson44834a62010-08-19 16:09:23 +01001791static int i915_opregion(struct seq_file *m, void *unused)
1792{
David Weinehall36cdd012016-08-22 13:59:31 +03001793 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1794 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001795 struct intel_opregion *opregion = &dev_priv->opregion;
1796 int ret;
1797
1798 ret = mutex_lock_interruptible(&dev->struct_mutex);
1799 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001800 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001801
Jani Nikula2455a8e2015-12-14 12:50:53 +02001802 if (opregion->header)
1803 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001804
1805 mutex_unlock(&dev->struct_mutex);
1806
Daniel Vetter0d38f002012-04-21 22:49:10 +02001807out:
Chris Wilson44834a62010-08-19 16:09:23 +01001808 return 0;
1809}
1810
Jani Nikulaada8f952015-12-15 13:17:12 +02001811static int i915_vbt(struct seq_file *m, void *unused)
1812{
David Weinehall36cdd012016-08-22 13:59:31 +03001813 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001814
1815 if (opregion->vbt)
1816 seq_write(m, opregion->vbt, opregion->vbt_size);
1817
1818 return 0;
1819}
1820
Chris Wilson37811fc2010-08-25 22:45:57 +01001821static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1822{
David Weinehall36cdd012016-08-22 13:59:31 +03001823 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1824 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301825 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001826 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001827 int ret;
1828
1829 ret = mutex_lock_interruptible(&dev->struct_mutex);
1830 if (ret)
1831 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001832
Daniel Vetter06957262015-08-10 13:34:08 +02001833#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001834 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001835 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001836
Chris Wilson25bcce92016-07-02 15:36:00 +01001837 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1838 fbdev_fb->base.width,
1839 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001840 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001841 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001842 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001843 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001844 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001845 seq_putc(m, '\n');
1846 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001847#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001848
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001849 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001850 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301851 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1852 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001853 continue;
1854
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001855 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001856 fb->base.width,
1857 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001858 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001859 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001860 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001861 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001862 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001863 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001864 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001865 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001866 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001867
1868 return 0;
1869}
1870
Chris Wilson7e37f882016-08-02 22:50:21 +01001871static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001872{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001873 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1874 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001875}
1876
Ben Widawskye76d3632011-03-19 18:14:29 -07001877static int i915_context_status(struct seq_file *m, void *unused)
1878{
David Weinehall36cdd012016-08-22 13:59:31 +03001879 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1880 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone2efd132016-05-24 14:53:34 +01001881 struct i915_gem_context *ctx;
Dave Gordonc3232b12016-03-23 18:19:53 +00001882 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001883
Daniel Vetterf3d28872014-05-29 23:23:08 +02001884 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001885 if (ret)
1886 return ret;
1887
Chris Wilson829a0af2017-06-20 12:05:45 +01001888 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson7e3d9a52019-03-08 13:25:16 +00001889 struct intel_context *ce;
1890
Chris Wilson288f1ce2018-09-04 16:31:17 +01001891 seq_puts(m, "HW context ");
1892 if (!list_empty(&ctx->hw_id_link))
1893 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1894 atomic_read(&ctx->hw_id_pin_count));
Chris Wilsonc84455b2016-08-15 10:49:08 +01001895 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001896 struct task_struct *task;
1897
Chris Wilsonc84455b2016-08-15 10:49:08 +01001898 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001899 if (task) {
1900 seq_printf(m, "(%s [%d]) ",
1901 task->comm, task->pid);
1902 put_task_struct(task);
1903 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001904 } else if (IS_ERR(ctx->file_priv)) {
1905 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001906 } else {
1907 seq_puts(m, "(kernel) ");
1908 }
1909
Chris Wilsonbca44d82016-05-24 14:53:41 +01001910 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1911 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001912
Chris Wilson7e3d9a52019-03-08 13:25:16 +00001913 list_for_each_entry(ce, &ctx->active_engines, active_link) {
1914 seq_printf(m, "%s: ", ce->engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001915 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001916 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001917 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001918 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001919 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001920 }
1921
Ben Widawskya33afea2013-09-17 21:12:45 -07001922 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001923 }
1924
Daniel Vetterf3d28872014-05-29 23:23:08 +02001925 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001926
1927 return 0;
1928}
1929
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001930static const char *swizzle_string(unsigned swizzle)
1931{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001932 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001933 case I915_BIT_6_SWIZZLE_NONE:
1934 return "none";
1935 case I915_BIT_6_SWIZZLE_9:
1936 return "bit9";
1937 case I915_BIT_6_SWIZZLE_9_10:
1938 return "bit9/bit10";
1939 case I915_BIT_6_SWIZZLE_9_11:
1940 return "bit9/bit11";
1941 case I915_BIT_6_SWIZZLE_9_10_11:
1942 return "bit9/bit10/bit11";
1943 case I915_BIT_6_SWIZZLE_9_17:
1944 return "bit9/bit17";
1945 case I915_BIT_6_SWIZZLE_9_10_17:
1946 return "bit9/bit10/bit17";
1947 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001948 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001949 }
1950
1951 return "bug";
1952}
1953
1954static int i915_swizzle_info(struct seq_file *m, void *data)
1955{
David Weinehall36cdd012016-08-22 13:59:31 +03001956 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001957 intel_wakeref_t wakeref;
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001958
Chris Wilsona0371212019-01-14 14:21:14 +00001959 wakeref = intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001960
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001961 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1962 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1963 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1964 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1965
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08001966 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001967 seq_printf(m, "DDC = 0x%08x\n",
1968 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01001969 seq_printf(m, "DDC2 = 0x%08x\n",
1970 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001971 seq_printf(m, "C0DRB3 = 0x%04x\n",
1972 I915_READ16(C0DRB3));
1973 seq_printf(m, "C1DRB3 = 0x%04x\n",
1974 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03001975 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001976 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1977 I915_READ(MAD_DIMM_C0));
1978 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1979 I915_READ(MAD_DIMM_C1));
1980 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1981 I915_READ(MAD_DIMM_C2));
1982 seq_printf(m, "TILECTL = 0x%08x\n",
1983 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03001984 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07001985 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1986 I915_READ(GAMTARBMODE));
1987 else
1988 seq_printf(m, "ARB_MODE = 0x%08x\n",
1989 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001990 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1991 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001992 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001993
1994 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1995 seq_puts(m, "L-shaped memory detected\n");
1996
Chris Wilsona0371212019-01-14 14:21:14 +00001997 intel_runtime_pm_put(dev_priv, wakeref);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001998
1999 return 0;
2000}
2001
Chris Wilson7466c292016-08-15 09:49:33 +01002002static const char *rps_power_to_str(unsigned int power)
2003{
2004 static const char * const strings[] = {
2005 [LOW_POWER] = "low power",
2006 [BETWEEN] = "mixed",
2007 [HIGH_POWER] = "high power",
2008 };
2009
2010 if (power >= ARRAY_SIZE(strings) || !strings[power])
2011 return "unknown";
2012
2013 return strings[power];
2014}
2015
Chris Wilson1854d5c2015-04-07 16:20:32 +01002016static int i915_rps_boost_info(struct seq_file *m, void *data)
2017{
David Weinehall36cdd012016-08-22 13:59:31 +03002018 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002019 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002020 u32 act_freq = rps->cur_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00002021 intel_wakeref_t wakeref;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002022
Chris Wilsond4225a52019-01-14 14:21:23 +00002023 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002024 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2025 mutex_lock(&dev_priv->pcu_lock);
2026 act_freq = vlv_punit_read(dev_priv,
2027 PUNIT_REG_GPU_FREQ_STS);
2028 act_freq = (act_freq >> 8) & 0xff;
2029 mutex_unlock(&dev_priv->pcu_lock);
2030 } else {
2031 act_freq = intel_get_cagf(dev_priv,
2032 I915_READ(GEN6_RPSTAT1));
2033 }
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002034 }
2035
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002036 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002037 seq_printf(m, "GPU busy? %s [%d requests]\n",
2038 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002039 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002040 atomic_read(&rps->num_waiters));
Chris Wilson60548c52018-07-31 14:26:29 +01002041 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002042 seq_printf(m, "Frequency requested %d, actual %d\n",
2043 intel_gpu_freq(dev_priv, rps->cur_freq),
2044 intel_gpu_freq(dev_priv, act_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002045 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002046 intel_gpu_freq(dev_priv, rps->min_freq),
2047 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2048 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2049 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002050 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002051 intel_gpu_freq(dev_priv, rps->idle_freq),
2052 intel_gpu_freq(dev_priv, rps->efficient_freq),
2053 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002054
Chris Wilson62eb3c22019-02-13 09:25:04 +00002055 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002056
Chris Wilson7466c292016-08-15 09:49:33 +01002057 if (INTEL_GEN(dev_priv) >= 6 &&
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002058 rps->enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002059 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002060 u32 rpup, rpupei;
2061 u32 rpdown, rpdownei;
2062
2063 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2064 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2065 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2066 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2067 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2068 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2069
2070 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Chris Wilson60548c52018-07-31 14:26:29 +01002071 rps_power_to_str(rps->power.mode));
Chris Wilson7466c292016-08-15 09:49:33 +01002072 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002073 rpup && rpupei ? 100 * rpup / rpupei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002074 rps->power.up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002075 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002076 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002077 rps->power.down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002078 } else {
2079 seq_puts(m, "\nRPS Autotuning inactive\n");
2080 }
2081
Chris Wilson8d3afd72015-05-21 21:01:47 +01002082 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002083}
2084
Ben Widawsky63573eb2013-07-04 11:02:07 -07002085static int i915_llc(struct seq_file *m, void *data)
2086{
David Weinehall36cdd012016-08-22 13:59:31 +03002087 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002088 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002089
David Weinehall36cdd012016-08-22 13:59:31 +03002090 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002091 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2092 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002093
2094 return 0;
2095}
2096
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002097static int i915_huc_load_status_info(struct seq_file *m, void *data)
2098{
2099 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002100 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002101 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002102
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002103 if (!HAS_HUC(dev_priv))
2104 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002105
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002106 p = drm_seq_file_printer(m);
2107 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002108
Chris Wilsond4225a52019-01-14 14:21:23 +00002109 with_intel_runtime_pm(dev_priv, wakeref)
2110 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002111
2112 return 0;
2113}
2114
Alex Daifdf5d352015-08-12 15:43:37 +01002115static int i915_guc_load_status_info(struct seq_file *m, void *data)
2116{
David Weinehall36cdd012016-08-22 13:59:31 +03002117 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002118 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002119 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002120
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002121 if (!HAS_GUC(dev_priv))
2122 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002123
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002124 p = drm_seq_file_printer(m);
2125 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002126
Chris Wilsond4225a52019-01-14 14:21:23 +00002127 with_intel_runtime_pm(dev_priv, wakeref) {
2128 u32 tmp = I915_READ(GUC_STATUS);
2129 u32 i;
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302130
Chris Wilsond4225a52019-01-14 14:21:23 +00002131 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2132 seq_printf(m, "\tBootrom status = 0x%x\n",
2133 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2134 seq_printf(m, "\tuKernel status = 0x%x\n",
2135 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2136 seq_printf(m, "\tMIA Core status = 0x%x\n",
2137 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2138 seq_puts(m, "\nScratch registers:\n");
2139 for (i = 0; i < 16; i++) {
2140 seq_printf(m, "\t%2d: \t0x%x\n",
2141 i, I915_READ(SOFT_SCRATCH(i)));
2142 }
2143 }
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302144
Alex Daifdf5d352015-08-12 15:43:37 +01002145 return 0;
2146}
2147
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002148static const char *
2149stringify_guc_log_type(enum guc_log_buffer_type type)
2150{
2151 switch (type) {
2152 case GUC_ISR_LOG_BUFFER:
2153 return "ISR";
2154 case GUC_DPC_LOG_BUFFER:
2155 return "DPC";
2156 case GUC_CRASH_DUMP_LOG_BUFFER:
2157 return "CRASH";
2158 default:
2159 MISSING_CASE(type);
2160 }
2161
2162 return "";
2163}
2164
Akash Goel5aa1ee42016-10-12 21:54:36 +05302165static void i915_guc_log_info(struct seq_file *m,
2166 struct drm_i915_private *dev_priv)
2167{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002168 struct intel_guc_log *log = &dev_priv->guc.log;
2169 enum guc_log_buffer_type type;
2170
2171 if (!intel_guc_log_relay_enabled(log)) {
2172 seq_puts(m, "GuC log relay disabled\n");
2173 return;
2174 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302175
Michał Winiarskidb557992018-03-19 10:53:43 +01002176 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302177
Michał Winiarski6a96be22018-03-19 10:53:42 +01002178 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002179 log->relay.full_count);
2180
2181 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2182 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2183 stringify_guc_log_type(type),
2184 log->stats[type].flush,
2185 log->stats[type].sampled_overflow);
2186 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302187}
2188
Dave Gordon8b417c22015-08-12 15:43:44 +01002189static void i915_guc_client_info(struct seq_file *m,
2190 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302191 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002192{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002193 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002194 enum intel_engine_id id;
Jani Nikulae5315212019-01-16 11:15:23 +02002195 u64 tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002196
Oscar Mateob09935a2017-03-22 10:39:53 -07002197 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2198 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002199 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2200 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002201
Akash Goel3b3f1652016-10-13 22:44:48 +05302202 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002203 u64 submissions = client->submissions[id];
2204 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002205 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002206 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002207 }
2208 seq_printf(m, "\tTotal: %llu\n", tot);
2209}
2210
2211static int i915_guc_info(struct seq_file *m, void *data)
2212{
David Weinehall36cdd012016-08-22 13:59:31 +03002213 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002214 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002215
Michał Winiarskidb557992018-03-19 10:53:43 +01002216 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002217 return -ENODEV;
2218
Michał Winiarskidb557992018-03-19 10:53:43 +01002219 i915_guc_log_info(m, dev_priv);
2220
2221 if (!USES_GUC_SUBMISSION(dev_priv))
2222 return 0;
2223
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002224 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002225
Michał Winiarskidb557992018-03-19 10:53:43 +01002226 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002227 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002228 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002229
Chris Wilson334636c2016-11-29 12:10:20 +00002230 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2231 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002232 if (guc->preempt_client) {
2233 seq_printf(m, "\nGuC preempt client @ %p:\n",
2234 guc->preempt_client);
2235 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2236 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002237
2238 /* Add more as required ... */
2239
2240 return 0;
2241}
2242
Oscar Mateoa8b93702017-05-10 15:04:51 +00002243static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002244{
David Weinehall36cdd012016-08-22 13:59:31 +03002245 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002246 const struct intel_guc *guc = &dev_priv->guc;
2247 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302248 struct intel_guc_client *client = guc->execbuf_client;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002249 unsigned int tmp;
2250 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002251
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002252 if (!USES_GUC_SUBMISSION(dev_priv))
2253 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002254
Oscar Mateoa8b93702017-05-10 15:04:51 +00002255 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2256 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002257
Oscar Mateoa8b93702017-05-10 15:04:51 +00002258 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2259 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002260
Oscar Mateoa8b93702017-05-10 15:04:51 +00002261 seq_printf(m, "GuC stage descriptor %u:\n", index);
2262 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2263 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2264 seq_printf(m, "\tPriority: %d\n", desc->priority);
2265 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2266 seq_printf(m, "\tEngines used: 0x%x\n",
2267 desc->engines_used);
2268 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2269 desc->db_trigger_phy,
2270 desc->db_trigger_cpu,
2271 desc->db_trigger_uk);
2272 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2273 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002274 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002275 desc->wq_addr, desc->wq_size);
2276 seq_putc(m, '\n');
2277
2278 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2279 u32 guc_engine_id = engine->guc_id;
2280 struct guc_execlist_context *lrc =
2281 &desc->lrc[guc_engine_id];
2282
2283 seq_printf(m, "\t%s LRC:\n", engine->name);
2284 seq_printf(m, "\t\tContext desc: 0x%x\n",
2285 lrc->context_desc);
2286 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2287 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2288 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2289 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2290 seq_putc(m, '\n');
2291 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002292 }
2293
Oscar Mateoa8b93702017-05-10 15:04:51 +00002294 return 0;
2295}
2296
Alex Dai4c7e77f2015-08-12 15:43:40 +01002297static int i915_guc_log_dump(struct seq_file *m, void *data)
2298{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002299 struct drm_info_node *node = m->private;
2300 struct drm_i915_private *dev_priv = node_to_i915(node);
2301 bool dump_load_err = !!node->info_ent->data;
2302 struct drm_i915_gem_object *obj = NULL;
2303 u32 *log;
2304 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002305
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002306 if (!HAS_GUC(dev_priv))
2307 return -ENODEV;
2308
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002309 if (dump_load_err)
2310 obj = dev_priv->guc.load_err_log;
2311 else if (dev_priv->guc.log.vma)
2312 obj = dev_priv->guc.log.vma->obj;
2313
2314 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002315 return 0;
2316
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002317 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2318 if (IS_ERR(log)) {
2319 DRM_DEBUG("Failed to pin object\n");
2320 seq_puts(m, "(log data unaccessible)\n");
2321 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002322 }
2323
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002324 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2325 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2326 *(log + i), *(log + i + 1),
2327 *(log + i + 2), *(log + i + 3));
2328
Alex Dai4c7e77f2015-08-12 15:43:40 +01002329 seq_putc(m, '\n');
2330
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002331 i915_gem_object_unpin_map(obj);
2332
Alex Dai4c7e77f2015-08-12 15:43:40 +01002333 return 0;
2334}
2335
Michał Winiarski4977a282018-03-19 10:53:40 +01002336static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302337{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002338 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302339
Michał Winiarski86aa8242018-03-08 16:46:53 +01002340 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002341 return -ENODEV;
2342
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002343 *val = intel_guc_log_get_level(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302344
2345 return 0;
2346}
2347
Michał Winiarski4977a282018-03-19 10:53:40 +01002348static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302349{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002350 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302351
Michał Winiarski86aa8242018-03-08 16:46:53 +01002352 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002353 return -ENODEV;
2354
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002355 return intel_guc_log_set_level(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302356}
2357
Michał Winiarski4977a282018-03-19 10:53:40 +01002358DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2359 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302360 "%lld\n");
2361
Michał Winiarski4977a282018-03-19 10:53:40 +01002362static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2363{
2364 struct drm_i915_private *dev_priv = inode->i_private;
2365
2366 if (!USES_GUC(dev_priv))
2367 return -ENODEV;
2368
2369 file->private_data = &dev_priv->guc.log;
2370
2371 return intel_guc_log_relay_open(&dev_priv->guc.log);
2372}
2373
2374static ssize_t
2375i915_guc_log_relay_write(struct file *filp,
2376 const char __user *ubuf,
2377 size_t cnt,
2378 loff_t *ppos)
2379{
2380 struct intel_guc_log *log = filp->private_data;
2381
2382 intel_guc_log_relay_flush(log);
2383
2384 return cnt;
2385}
2386
2387static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2388{
2389 struct drm_i915_private *dev_priv = inode->i_private;
2390
2391 intel_guc_log_relay_close(&dev_priv->guc.log);
2392
2393 return 0;
2394}
2395
2396static const struct file_operations i915_guc_log_relay_fops = {
2397 .owner = THIS_MODULE,
2398 .open = i915_guc_log_relay_open,
2399 .write = i915_guc_log_relay_write,
2400 .release = i915_guc_log_relay_release,
2401};
2402
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002403static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2404{
2405 u8 val;
2406 static const char * const sink_status[] = {
2407 "inactive",
2408 "transition to active, capture and display",
2409 "active, display from RFB",
2410 "active, capture and display on sink device timings",
2411 "transition to inactive, capture and display, timing re-sync",
2412 "reserved",
2413 "reserved",
2414 "sink internal error",
2415 };
2416 struct drm_connector *connector = m->private;
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002417 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002418 struct intel_dp *intel_dp =
2419 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002420 int ret;
2421
2422 if (!CAN_PSR(dev_priv)) {
2423 seq_puts(m, "PSR Unsupported\n");
2424 return -ENODEV;
2425 }
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002426
2427 if (connector->status != connector_status_connected)
2428 return -ENODEV;
2429
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002430 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2431
2432 if (ret == 1) {
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002433 const char *str = "unknown";
2434
2435 val &= DP_PSR_SINK_STATE_MASK;
2436 if (val < ARRAY_SIZE(sink_status))
2437 str = sink_status[val];
2438 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2439 } else {
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002440 return ret;
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002441 }
2442
2443 return 0;
2444}
2445DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2446
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302447static void
2448psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002449{
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002450 u32 val, status_val;
2451 const char *status = "unknown";
Chris Wilsonb86bef202017-01-16 13:06:21 +00002452
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302453 if (dev_priv->psr.psr2_enabled) {
2454 static const char * const live_status[] = {
2455 "IDLE",
2456 "CAPTURE",
2457 "CAPTURE_FS",
2458 "SLEEP",
2459 "BUFON_FW",
2460 "ML_UP",
2461 "SU_STANDBY",
2462 "FAST_SLEEP",
2463 "DEEP_SLEEP",
2464 "BUF_ON",
2465 "TG_ON"
2466 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002467 val = I915_READ(EDP_PSR2_STATUS);
2468 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2469 EDP_PSR2_STATUS_STATE_SHIFT;
2470 if (status_val < ARRAY_SIZE(live_status))
2471 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302472 } else {
2473 static const char * const live_status[] = {
2474 "IDLE",
2475 "SRDONACK",
2476 "SRDENT",
2477 "BUFOFF",
2478 "BUFON",
2479 "AUXACK",
2480 "SRDOFFACK",
2481 "SRDENT_ON",
2482 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002483 val = I915_READ(EDP_PSR_STATUS);
2484 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2485 EDP_PSR_STATUS_STATE_SHIFT;
2486 if (status_val < ARRAY_SIZE(live_status))
2487 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302488 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002489
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002490 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
Chris Wilsonb86bef202017-01-16 13:06:21 +00002491}
2492
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002493static int i915_edp_psr_status(struct seq_file *m, void *data)
2494{
David Weinehall36cdd012016-08-22 13:59:31 +03002495 struct drm_i915_private *dev_priv = node_to_i915(m->private);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002496 struct i915_psr *psr = &dev_priv->psr;
Chris Wilsona0371212019-01-14 14:21:14 +00002497 intel_wakeref_t wakeref;
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002498 const char *status;
2499 bool enabled;
2500 u32 val;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002501
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002502 if (!HAS_PSR(dev_priv))
2503 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002504
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002505 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2506 if (psr->dp)
2507 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2508 seq_puts(m, "\n");
2509
2510 if (!psr->sink_support)
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002511 return 0;
2512
Chris Wilsona0371212019-01-14 14:21:14 +00002513 wakeref = intel_runtime_pm_get(dev_priv);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002514 mutex_lock(&psr->lock);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002515
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002516 if (psr->enabled)
2517 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002518 else
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002519 status = "disabled";
2520 seq_printf(m, "PSR mode: %s\n", status);
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002521
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002522 if (!psr->enabled)
2523 goto unlock;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002524
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002525 if (psr->psr2_enabled) {
2526 val = I915_READ(EDP_PSR2_CTL);
2527 enabled = val & EDP_PSR2_ENABLE;
2528 } else {
2529 val = I915_READ(EDP_PSR_CTL);
2530 enabled = val & EDP_PSR_ENABLE;
2531 }
2532 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2533 enableddisabled(enabled), val);
2534 psr_source_status(dev_priv, m);
2535 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2536 psr->busy_frontbuffer_bits);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002537
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002538 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002539 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2540 */
David Weinehall36cdd012016-08-22 13:59:31 +03002541 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002542 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2543 seq_printf(m, "Performance counter: %u\n", val);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002544 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302545
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002546 if (psr->debug & I915_PSR_DEBUG_IRQ) {
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002547 seq_printf(m, "Last attempted entry at: %lld\n",
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002548 psr->last_entry_attempt);
2549 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002550 }
2551
José Roberto de Souzaa81f7812019-01-17 12:55:48 -08002552 if (psr->psr2_enabled) {
2553 u32 su_frames_val[3];
2554 int frame;
2555
2556 /*
2557 * Reading all 3 registers before hand to minimize crossing a
2558 * frame boundary between register reads
2559 */
2560 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2561 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2562
2563 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2564
2565 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2566 u32 su_blocks;
2567
2568 su_blocks = su_frames_val[frame / 3] &
2569 PSR2_SU_STATUS_MASK(frame);
2570 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2571 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2572 }
2573 }
2574
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002575unlock:
2576 mutex_unlock(&psr->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00002577 intel_runtime_pm_put(dev_priv, wakeref);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002578
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002579 return 0;
2580}
2581
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002582static int
2583i915_edp_psr_debug_set(void *data, u64 val)
2584{
2585 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00002586 intel_wakeref_t wakeref;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002587 int ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002588
2589 if (!CAN_PSR(dev_priv))
2590 return -ENODEV;
2591
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002592 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002593
Chris Wilsona0371212019-01-14 14:21:14 +00002594 wakeref = intel_runtime_pm_get(dev_priv);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002595
José Roberto de Souza23ec9f52019-02-06 13:18:45 -08002596 ret = intel_psr_debug_set(dev_priv, val);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002597
Chris Wilsona0371212019-01-14 14:21:14 +00002598 intel_runtime_pm_put(dev_priv, wakeref);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002599
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002600 return ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002601}
2602
2603static int
2604i915_edp_psr_debug_get(void *data, u64 *val)
2605{
2606 struct drm_i915_private *dev_priv = data;
2607
2608 if (!CAN_PSR(dev_priv))
2609 return -ENODEV;
2610
2611 *val = READ_ONCE(dev_priv->psr.debug);
2612 return 0;
2613}
2614
2615DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2616 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2617 "%llu\n");
2618
Jesse Barnesec013e72013-08-20 10:29:23 +01002619static int i915_energy_uJ(struct seq_file *m, void *data)
2620{
David Weinehall36cdd012016-08-22 13:59:31 +03002621 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002622 unsigned long long power;
Chris Wilsona0371212019-01-14 14:21:14 +00002623 intel_wakeref_t wakeref;
Jesse Barnesec013e72013-08-20 10:29:23 +01002624 u32 units;
2625
David Weinehall36cdd012016-08-22 13:59:31 +03002626 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002627 return -ENODEV;
2628
Chris Wilsond4225a52019-01-14 14:21:23 +00002629 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002630 return -ENODEV;
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002631
2632 units = (power & 0x1f00) >> 8;
Chris Wilsond4225a52019-01-14 14:21:23 +00002633 with_intel_runtime_pm(dev_priv, wakeref)
2634 power = I915_READ(MCH_SECP_NRG_STTS);
2635
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002636 power = (1000000 * power) >> units; /* convert to uJ */
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002637 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002638
2639 return 0;
2640}
2641
Damien Lespiau6455c872015-06-04 18:23:57 +01002642static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002643{
David Weinehall36cdd012016-08-22 13:59:31 +03002644 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002645 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002646
Chris Wilsona156e642016-04-03 14:14:21 +01002647 if (!HAS_RUNTIME_PM(dev_priv))
2648 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002649
Chris Wilson25c896bd2019-01-14 14:21:25 +00002650 seq_printf(m, "Runtime power status: %s\n",
2651 enableddisabled(!dev_priv->power_domains.wakeref));
2652
Chris Wilsond9948a12019-02-28 10:20:35 +00002653 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
Paulo Zanoni371db662013-08-19 13:18:10 -03002654 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002655 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002656#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002657 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002658 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002659#else
2660 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2661#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002662 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002663 pci_power_name(pdev->current_state),
2664 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002665
Chris Wilsonbd780f32019-01-14 14:21:09 +00002666 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2667 struct drm_printer p = drm_seq_file_printer(m);
2668
2669 print_intel_runtime_pm_wakeref(dev_priv, &p);
2670 }
2671
Jesse Barnesec013e72013-08-20 10:29:23 +01002672 return 0;
2673}
2674
Imre Deak1da51582013-11-25 17:15:35 +02002675static int i915_power_domain_info(struct seq_file *m, void *unused)
2676{
David Weinehall36cdd012016-08-22 13:59:31 +03002677 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002678 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2679 int i;
2680
2681 mutex_lock(&power_domains->lock);
2682
2683 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2684 for (i = 0; i < power_domains->power_well_count; i++) {
2685 struct i915_power_well *power_well;
2686 enum intel_display_power_domain power_domain;
2687
2688 power_well = &power_domains->power_wells[i];
Imre Deakf28ec6f2018-08-06 12:58:37 +03002689 seq_printf(m, "%-25s %d\n", power_well->desc->name,
Imre Deak1da51582013-11-25 17:15:35 +02002690 power_well->count);
2691
Imre Deakf28ec6f2018-08-06 12:58:37 +03002692 for_each_power_domain(power_domain, power_well->desc->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002693 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002694 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002695 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002696 }
2697
2698 mutex_unlock(&power_domains->lock);
2699
2700 return 0;
2701}
2702
Damien Lespiaub7cec662015-10-27 14:47:01 +02002703static int i915_dmc_info(struct seq_file *m, void *unused)
2704{
David Weinehall36cdd012016-08-22 13:59:31 +03002705 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002706 intel_wakeref_t wakeref;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002707 struct intel_csr *csr;
2708
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002709 if (!HAS_CSR(dev_priv))
2710 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002711
2712 csr = &dev_priv->csr;
2713
Chris Wilsona0371212019-01-14 14:21:14 +00002714 wakeref = intel_runtime_pm_get(dev_priv);
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002715
Damien Lespiaub7cec662015-10-27 14:47:01 +02002716 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2717 seq_printf(m, "path: %s\n", csr->fw_path);
2718
2719 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002720 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002721
2722 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2723 CSR_VERSION_MINOR(csr->version));
2724
Imre Deak34b2f8d2018-10-31 22:02:20 +02002725 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2726 goto out;
2727
2728 seq_printf(m, "DC3 -> DC5 count: %d\n",
2729 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2730 SKL_CSR_DC3_DC5_COUNT));
2731 if (!IS_GEN9_LP(dev_priv))
Damien Lespiau83372062015-10-30 17:53:32 +02002732 seq_printf(m, "DC5 -> DC6 count: %d\n",
2733 I915_READ(SKL_CSR_DC5_DC6_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002734
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002735out:
2736 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2737 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2738 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2739
Chris Wilsona0371212019-01-14 14:21:14 +00002740 intel_runtime_pm_put(dev_priv, wakeref);
Damien Lespiau83372062015-10-30 17:53:32 +02002741
Damien Lespiaub7cec662015-10-27 14:47:01 +02002742 return 0;
2743}
2744
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002745static void intel_seq_print_mode(struct seq_file *m, int tabs,
2746 struct drm_display_mode *mode)
2747{
2748 int i;
2749
2750 for (i = 0; i < tabs; i++)
2751 seq_putc(m, '\t');
2752
Shayenne Moura4fb6bb82018-12-20 10:27:57 -02002753 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002754}
2755
2756static void intel_encoder_info(struct seq_file *m,
2757 struct intel_crtc *intel_crtc,
2758 struct intel_encoder *intel_encoder)
2759{
David Weinehall36cdd012016-08-22 13:59:31 +03002760 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2761 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002762 struct drm_crtc *crtc = &intel_crtc->base;
2763 struct intel_connector *intel_connector;
2764 struct drm_encoder *encoder;
2765
2766 encoder = &intel_encoder->base;
2767 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002768 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002769 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2770 struct drm_connector *connector = &intel_connector->base;
2771 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2772 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002773 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002774 drm_get_connector_status_name(connector->status));
2775 if (connector->status == connector_status_connected) {
2776 struct drm_display_mode *mode = &crtc->mode;
2777 seq_printf(m, ", mode:\n");
2778 intel_seq_print_mode(m, 2, mode);
2779 } else {
2780 seq_putc(m, '\n');
2781 }
2782 }
2783}
2784
2785static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2786{
David Weinehall36cdd012016-08-22 13:59:31 +03002787 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2788 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002789 struct drm_crtc *crtc = &intel_crtc->base;
2790 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002791 struct drm_plane_state *plane_state = crtc->primary->state;
2792 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002793
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002794 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002795 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002796 fb->base.id, plane_state->src_x >> 16,
2797 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002798 else
2799 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002800 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2801 intel_encoder_info(m, intel_crtc, intel_encoder);
2802}
2803
2804static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2805{
2806 struct drm_display_mode *mode = panel->fixed_mode;
2807
2808 seq_printf(m, "\tfixed mode:\n");
2809 intel_seq_print_mode(m, 2, mode);
2810}
2811
2812static void intel_dp_info(struct seq_file *m,
2813 struct intel_connector *intel_connector)
2814{
2815 struct intel_encoder *intel_encoder = intel_connector->encoder;
2816 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2817
2818 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03002819 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02002820 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002821 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03002822
2823 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2824 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002825}
2826
Libin Yang9a148a92016-11-28 20:07:05 +08002827static void intel_dp_mst_info(struct seq_file *m,
2828 struct intel_connector *intel_connector)
2829{
2830 struct intel_encoder *intel_encoder = intel_connector->encoder;
2831 struct intel_dp_mst_encoder *intel_mst =
2832 enc_to_mst(&intel_encoder->base);
2833 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2834 struct intel_dp *intel_dp = &intel_dig_port->dp;
2835 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2836 intel_connector->port);
2837
2838 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2839}
2840
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002841static void intel_hdmi_info(struct seq_file *m,
2842 struct intel_connector *intel_connector)
2843{
2844 struct intel_encoder *intel_encoder = intel_connector->encoder;
2845 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2846
Jani Nikula742f4912015-09-03 11:16:09 +03002847 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002848}
2849
2850static void intel_lvds_info(struct seq_file *m,
2851 struct intel_connector *intel_connector)
2852{
2853 intel_panel_info(m, &intel_connector->panel);
2854}
2855
2856static void intel_connector_info(struct seq_file *m,
2857 struct drm_connector *connector)
2858{
2859 struct intel_connector *intel_connector = to_intel_connector(connector);
2860 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08002861 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002862
2863 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03002864 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002865 drm_get_connector_status_name(connector->status));
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002866
2867 if (connector->status == connector_status_disconnected)
2868 return;
2869
2870 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2871 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2872 connector->display_info.width_mm,
2873 connector->display_info.height_mm);
2874 seq_printf(m, "\tsubpixel order: %s\n",
2875 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2876 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002877
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02002878 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002879 return;
2880
2881 switch (connector->connector_type) {
2882 case DRM_MODE_CONNECTOR_DisplayPort:
2883 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08002884 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2885 intel_dp_mst_info(m, intel_connector);
2886 else
2887 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002888 break;
2889 case DRM_MODE_CONNECTOR_LVDS:
2890 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10002891 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002892 break;
2893 case DRM_MODE_CONNECTOR_HDMIA:
2894 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03002895 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002896 intel_hdmi_info(m, intel_connector);
2897 break;
2898 default:
2899 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10002900 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002901
Jesse Barnesf103fc72014-02-20 12:39:57 -08002902 seq_printf(m, "\tmodes:\n");
2903 list_for_each_entry(mode, &connector->modes, head)
2904 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002905}
2906
Robert Fekete3abc4e02015-10-27 16:58:32 +01002907static const char *plane_type(enum drm_plane_type type)
2908{
2909 switch (type) {
2910 case DRM_PLANE_TYPE_OVERLAY:
2911 return "OVL";
2912 case DRM_PLANE_TYPE_PRIMARY:
2913 return "PRI";
2914 case DRM_PLANE_TYPE_CURSOR:
2915 return "CUR";
2916 /*
2917 * Deliberately omitting default: to generate compiler warnings
2918 * when a new drm_plane_type gets added.
2919 */
2920 }
2921
2922 return "unknown";
2923}
2924
Jani Nikula5852a152019-01-07 16:51:49 +02002925static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
Robert Fekete3abc4e02015-10-27 16:58:32 +01002926{
Robert Fekete3abc4e02015-10-27 16:58:32 +01002927 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04002928 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01002929 * will print them all to visualize if the values are misused
2930 */
Jani Nikula5852a152019-01-07 16:51:49 +02002931 snprintf(buf, bufsize,
Robert Fekete3abc4e02015-10-27 16:58:32 +01002932 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04002933 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2934 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2935 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2936 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2937 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2938 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01002939 rotation);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002940}
2941
2942static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2943{
David Weinehall36cdd012016-08-22 13:59:31 +03002944 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2945 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01002946 struct intel_plane *intel_plane;
2947
2948 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2949 struct drm_plane_state *state;
2950 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002951 struct drm_format_name_buf format_name;
Jani Nikula5852a152019-01-07 16:51:49 +02002952 char rot_str[48];
Robert Fekete3abc4e02015-10-27 16:58:32 +01002953
2954 if (!plane->state) {
2955 seq_puts(m, "plane->state is NULL!\n");
2956 continue;
2957 }
2958
2959 state = plane->state;
2960
Eric Engestrom90844f02016-08-15 01:02:38 +01002961 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002962 drm_get_format_name(state->fb->format->format,
2963 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01002964 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002965 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01002966 }
2967
Jani Nikula5852a152019-01-07 16:51:49 +02002968 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2969
Robert Fekete3abc4e02015-10-27 16:58:32 +01002970 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2971 plane->base.id,
2972 plane_type(intel_plane->base.type),
2973 state->crtc_x, state->crtc_y,
2974 state->crtc_w, state->crtc_h,
2975 (state->src_x >> 16),
2976 ((state->src_x & 0xffff) * 15625) >> 10,
2977 (state->src_y >> 16),
2978 ((state->src_y & 0xffff) * 15625) >> 10,
2979 (state->src_w >> 16),
2980 ((state->src_w & 0xffff) * 15625) >> 10,
2981 (state->src_h >> 16),
2982 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002983 format_name.str,
Jani Nikula5852a152019-01-07 16:51:49 +02002984 rot_str);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002985 }
2986}
2987
2988static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2989{
2990 struct intel_crtc_state *pipe_config;
2991 int num_scalers = intel_crtc->num_scalers;
2992 int i;
2993
2994 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2995
2996 /* Not all platformas have a scaler */
2997 if (num_scalers) {
2998 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2999 num_scalers,
3000 pipe_config->scaler_state.scaler_users,
3001 pipe_config->scaler_state.scaler_id);
3002
A.Sunil Kamath58415912016-11-20 23:20:26 +05303003 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003004 struct intel_scaler *sc =
3005 &pipe_config->scaler_state.scalers[i];
3006
3007 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3008 i, yesno(sc->in_use), sc->mode);
3009 }
3010 seq_puts(m, "\n");
3011 } else {
3012 seq_puts(m, "\tNo scalers available on this platform\n");
3013 }
3014}
3015
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003016static int i915_display_info(struct seq_file *m, void *unused)
3017{
David Weinehall36cdd012016-08-22 13:59:31 +03003018 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3019 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec2014-03-12 09:13:13 +00003020 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003021 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003022 struct drm_connector_list_iter conn_iter;
Chris Wilsona0371212019-01-14 14:21:14 +00003023 intel_wakeref_t wakeref;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003024
Chris Wilsona0371212019-01-14 14:21:14 +00003025 wakeref = intel_runtime_pm_get(dev_priv);
3026
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003027 seq_printf(m, "CRTC info\n");
3028 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003029 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003030 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003031
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003032 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003033 pipe_config = to_intel_crtc_state(crtc->base.state);
3034
Robert Fekete3abc4e02015-10-27 16:58:32 +01003035 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec2014-03-12 09:13:13 +00003036 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003037 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003038 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3039 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3040
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003041 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003042 struct intel_plane *cursor =
3043 to_intel_plane(crtc->base.cursor);
3044
Chris Wilson065f2ec2014-03-12 09:13:13 +00003045 intel_crtc_info(m, crtc);
3046
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003047 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3048 yesno(cursor->base.state->visible),
3049 cursor->base.state->crtc_x,
3050 cursor->base.state->crtc_y,
3051 cursor->base.state->crtc_w,
3052 cursor->base.state->crtc_h,
3053 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003054 intel_scaler_info(m, crtc);
3055 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003056 }
Daniel Vettercace8412014-05-22 17:56:31 +02003057
3058 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3059 yesno(!crtc->cpu_fifo_underrun_disabled),
3060 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003061 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003062 }
3063
3064 seq_printf(m, "\n");
3065 seq_printf(m, "Connector info\n");
3066 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003067 mutex_lock(&dev->mode_config.mutex);
3068 drm_connector_list_iter_begin(dev, &conn_iter);
3069 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003070 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003071 drm_connector_list_iter_end(&conn_iter);
3072 mutex_unlock(&dev->mode_config.mutex);
3073
Chris Wilsona0371212019-01-14 14:21:14 +00003074 intel_runtime_pm_put(dev_priv, wakeref);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003075
3076 return 0;
3077}
3078
Chris Wilson1b365952016-10-04 21:11:31 +01003079static int i915_engine_info(struct seq_file *m, void *unused)
3080{
3081 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3082 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +00003083 intel_wakeref_t wakeref;
Akash Goel3b3f1652016-10-13 22:44:48 +05303084 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003085 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003086
Chris Wilsona0371212019-01-14 14:21:14 +00003087 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01003088
Chris Wilsond9948a12019-02-28 10:20:35 +00003089 seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake));
Chris Wilsonf73b5672017-03-02 15:03:56 +00003090 seq_printf(m, "Global active requests: %d\n",
3091 dev_priv->gt.active_requests);
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003092 seq_printf(m, "CS timestamp frequency: %u kHz\n",
Jani Nikula02584042018-12-31 16:56:41 +02003093 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003094
Chris Wilsonf636edb2017-10-09 12:02:57 +01003095 p = drm_seq_file_printer(m);
3096 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003097 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003098
Chris Wilsona0371212019-01-14 14:21:14 +00003099 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilson9c870d02016-10-24 13:42:15 +01003100
Chris Wilson1b365952016-10-04 21:11:31 +01003101 return 0;
3102}
3103
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003104static int i915_rcs_topology(struct seq_file *m, void *unused)
3105{
3106 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3107 struct drm_printer p = drm_seq_file_printer(m);
3108
Jani Nikula02584042018-12-31 16:56:41 +02003109 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003110
3111 return 0;
3112}
3113
Chris Wilsonc5418a82017-10-13 21:26:19 +01003114static int i915_shrinker_info(struct seq_file *m, void *unused)
3115{
3116 struct drm_i915_private *i915 = node_to_i915(m->private);
3117
3118 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3119 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3120
3121 return 0;
3122}
3123
Daniel Vetter728e29d2014-06-25 22:01:53 +03003124static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3125{
David Weinehall36cdd012016-08-22 13:59:31 +03003126 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3127 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003128 int i;
3129
3130 drm_modeset_lock_all(dev);
3131 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3132 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3133
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003134 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003135 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003136 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003137 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003138 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003139 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003140 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003141 pll->state.hw_state.dpll_md);
3142 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3143 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3144 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07003145 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3146 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3147 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3148 pll->state.hw_state.mg_refclkin_ctl);
3149 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3150 pll->state.hw_state.mg_clktop2_coreclkctl1);
3151 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3152 pll->state.hw_state.mg_clktop2_hsclkctl);
3153 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3154 pll->state.hw_state.mg_pll_div0);
3155 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3156 pll->state.hw_state.mg_pll_div1);
3157 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3158 pll->state.hw_state.mg_pll_lf);
3159 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3160 pll->state.hw_state.mg_pll_frac_lock);
3161 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3162 pll->state.hw_state.mg_pll_ssc);
3163 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3164 pll->state.hw_state.mg_pll_bias);
3165 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3166 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003167 }
3168 drm_modeset_unlock_all(dev);
3169
3170 return 0;
3171}
3172
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003173static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003174{
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003175 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilson8a68d462019-03-05 18:03:30 +00003176 const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003177 struct i915_wa *wa;
3178 unsigned int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003179
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003180 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3181 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
Chris Wilson548764b2018-06-15 13:02:07 +01003182 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003183 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
Arun Siluvery888b5992014-08-26 14:44:51 +01003184
3185 return 0;
3186}
3187
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303188static int i915_ipc_status_show(struct seq_file *m, void *data)
3189{
3190 struct drm_i915_private *dev_priv = m->private;
3191
3192 seq_printf(m, "Isochronous Priority Control: %s\n",
3193 yesno(dev_priv->ipc_enabled));
3194 return 0;
3195}
3196
3197static int i915_ipc_status_open(struct inode *inode, struct file *file)
3198{
3199 struct drm_i915_private *dev_priv = inode->i_private;
3200
3201 if (!HAS_IPC(dev_priv))
3202 return -ENODEV;
3203
3204 return single_open(file, i915_ipc_status_show, dev_priv);
3205}
3206
3207static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3208 size_t len, loff_t *offp)
3209{
3210 struct seq_file *m = file->private_data;
3211 struct drm_i915_private *dev_priv = m->private;
Chris Wilsona0371212019-01-14 14:21:14 +00003212 intel_wakeref_t wakeref;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303213 bool enable;
Chris Wilsond4225a52019-01-14 14:21:23 +00003214 int ret;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303215
3216 ret = kstrtobool_from_user(ubuf, len, &enable);
3217 if (ret < 0)
3218 return ret;
3219
Chris Wilsond4225a52019-01-14 14:21:23 +00003220 with_intel_runtime_pm(dev_priv, wakeref) {
3221 if (!dev_priv->ipc_enabled && enable)
3222 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3223 dev_priv->wm.distrust_bios_wm = true;
3224 dev_priv->ipc_enabled = enable;
3225 intel_enable_ipc(dev_priv);
3226 }
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303227
3228 return len;
3229}
3230
3231static const struct file_operations i915_ipc_status_fops = {
3232 .owner = THIS_MODULE,
3233 .open = i915_ipc_status_open,
3234 .read = seq_read,
3235 .llseek = seq_lseek,
3236 .release = single_release,
3237 .write = i915_ipc_status_write
3238};
3239
Damien Lespiauc5511e42014-11-04 17:06:51 +00003240static int i915_ddb_info(struct seq_file *m, void *unused)
3241{
David Weinehall36cdd012016-08-22 13:59:31 +03003242 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3243 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003244 struct skl_ddb_entry *entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003245 struct intel_crtc *crtc;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003246
David Weinehall36cdd012016-08-22 13:59:31 +03003247 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003248 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003249
Damien Lespiauc5511e42014-11-04 17:06:51 +00003250 drm_modeset_lock_all(dev);
3251
Damien Lespiauc5511e42014-11-04 17:06:51 +00003252 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3253
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003254 for_each_intel_crtc(&dev_priv->drm, crtc) {
3255 struct intel_crtc_state *crtc_state =
3256 to_intel_crtc_state(crtc->base.state);
3257 enum pipe pipe = crtc->pipe;
3258 enum plane_id plane_id;
3259
Damien Lespiauc5511e42014-11-04 17:06:51 +00003260 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3261
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003262 for_each_plane_id_on_crtc(crtc, plane_id) {
3263 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3264 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
Damien Lespiauc5511e42014-11-04 17:06:51 +00003265 entry->start, entry->end,
3266 skl_ddb_entry_size(entry));
3267 }
3268
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003269 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003270 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3271 entry->end, skl_ddb_entry_size(entry));
3272 }
3273
3274 drm_modeset_unlock_all(dev);
3275
3276 return 0;
3277}
3278
Vandana Kannana54746e2015-03-03 20:53:10 +05303279static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003280 struct drm_device *dev,
3281 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303282{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003283 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303284 struct i915_drrs *drrs = &dev_priv->drrs;
3285 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003286 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003287 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303288
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003289 drm_connector_list_iter_begin(dev, &conn_iter);
3290 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003291 if (connector->state->crtc != &intel_crtc->base)
3292 continue;
3293
3294 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303295 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003296 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303297
3298 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3299 seq_puts(m, "\tVBT: DRRS_type: Static");
3300 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3301 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3302 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3303 seq_puts(m, "\tVBT: DRRS_type: None");
3304 else
3305 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3306
3307 seq_puts(m, "\n\n");
3308
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003309 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303310 struct intel_panel *panel;
3311
3312 mutex_lock(&drrs->mutex);
3313 /* DRRS Supported */
3314 seq_puts(m, "\tDRRS Supported: Yes\n");
3315
3316 /* disable_drrs() will make drrs->dp NULL */
3317 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303318 seq_puts(m, "Idleness DRRS: Disabled\n");
3319 if (dev_priv->psr.enabled)
3320 seq_puts(m,
3321 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303322 mutex_unlock(&drrs->mutex);
3323 return;
3324 }
3325
3326 panel = &drrs->dp->attached_connector->panel;
3327 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3328 drrs->busy_frontbuffer_bits);
3329
3330 seq_puts(m, "\n\t\t");
3331 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3332 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3333 vrefresh = panel->fixed_mode->vrefresh;
3334 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3335 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3336 vrefresh = panel->downclock_mode->vrefresh;
3337 } else {
3338 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3339 drrs->refresh_rate_type);
3340 mutex_unlock(&drrs->mutex);
3341 return;
3342 }
3343 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3344
3345 seq_puts(m, "\n\t\t");
3346 mutex_unlock(&drrs->mutex);
3347 } else {
3348 /* DRRS not supported. Print the VBT parameter*/
3349 seq_puts(m, "\tDRRS Supported : No");
3350 }
3351 seq_puts(m, "\n");
3352}
3353
3354static int i915_drrs_status(struct seq_file *m, void *unused)
3355{
David Weinehall36cdd012016-08-22 13:59:31 +03003356 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3357 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303358 struct intel_crtc *intel_crtc;
3359 int active_crtc_cnt = 0;
3360
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003361 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303362 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003363 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303364 active_crtc_cnt++;
3365 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3366
3367 drrs_status_per_crtc(m, dev, intel_crtc);
3368 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303369 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003370 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303371
3372 if (!active_crtc_cnt)
3373 seq_puts(m, "No active crtc found\n");
3374
3375 return 0;
3376}
3377
Dave Airlie11bed952014-05-12 15:22:27 +10003378static int i915_dp_mst_info(struct seq_file *m, void *unused)
3379{
David Weinehall36cdd012016-08-22 13:59:31 +03003380 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3381 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003382 struct intel_encoder *intel_encoder;
3383 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003384 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003385 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003386
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003387 drm_connector_list_iter_begin(dev, &conn_iter);
3388 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003389 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003390 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003391
3392 intel_encoder = intel_attached_encoder(connector);
3393 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3394 continue;
3395
3396 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003397 if (!intel_dig_port->dp.can_mst)
3398 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003399
Jim Bride40ae80c2016-04-14 10:18:37 -07003400 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003401 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003402 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3403 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003404 drm_connector_list_iter_end(&conn_iter);
3405
Dave Airlie11bed952014-05-12 15:22:27 +10003406 return 0;
3407}
3408
Todd Previteeb3394fa2015-04-18 00:04:19 -07003409static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003410 const char __user *ubuf,
3411 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003412{
3413 char *input_buffer;
3414 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003415 struct drm_device *dev;
3416 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003417 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003418 struct intel_dp *intel_dp;
3419 int val = 0;
3420
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303421 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003422
Todd Previteeb3394fa2015-04-18 00:04:19 -07003423 if (len == 0)
3424 return 0;
3425
Geliang Tang261aeba2017-05-06 23:40:17 +08003426 input_buffer = memdup_user_nul(ubuf, len);
3427 if (IS_ERR(input_buffer))
3428 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003429
Todd Previteeb3394fa2015-04-18 00:04:19 -07003430 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3431
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003432 drm_connector_list_iter_begin(dev, &conn_iter);
3433 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003434 struct intel_encoder *encoder;
3435
Todd Previteeb3394fa2015-04-18 00:04:19 -07003436 if (connector->connector_type !=
3437 DRM_MODE_CONNECTOR_DisplayPort)
3438 continue;
3439
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003440 encoder = to_intel_encoder(connector->encoder);
3441 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3442 continue;
3443
3444 if (encoder && connector->status == connector_status_connected) {
3445 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003446 status = kstrtoint(input_buffer, 10, &val);
3447 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003448 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003449 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3450 /* To prevent erroneous activation of the compliance
3451 * testing code, only accept an actual value of 1 here
3452 */
3453 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003454 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003455 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003456 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003457 }
3458 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003459 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003460 kfree(input_buffer);
3461 if (status < 0)
3462 return status;
3463
3464 *offp += len;
3465 return len;
3466}
3467
3468static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3469{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003470 struct drm_i915_private *dev_priv = m->private;
3471 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003472 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003473 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003474 struct intel_dp *intel_dp;
3475
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003476 drm_connector_list_iter_begin(dev, &conn_iter);
3477 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003478 struct intel_encoder *encoder;
3479
Todd Previteeb3394fa2015-04-18 00:04:19 -07003480 if (connector->connector_type !=
3481 DRM_MODE_CONNECTOR_DisplayPort)
3482 continue;
3483
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003484 encoder = to_intel_encoder(connector->encoder);
3485 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3486 continue;
3487
3488 if (encoder && connector->status == connector_status_connected) {
3489 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003490 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003491 seq_puts(m, "1");
3492 else
3493 seq_puts(m, "0");
3494 } else
3495 seq_puts(m, "0");
3496 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003497 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003498
3499 return 0;
3500}
3501
3502static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003503 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003504{
David Weinehall36cdd012016-08-22 13:59:31 +03003505 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003506 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003507}
3508
3509static const struct file_operations i915_displayport_test_active_fops = {
3510 .owner = THIS_MODULE,
3511 .open = i915_displayport_test_active_open,
3512 .read = seq_read,
3513 .llseek = seq_lseek,
3514 .release = single_release,
3515 .write = i915_displayport_test_active_write
3516};
3517
3518static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3519{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003520 struct drm_i915_private *dev_priv = m->private;
3521 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003522 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003523 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003524 struct intel_dp *intel_dp;
3525
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003526 drm_connector_list_iter_begin(dev, &conn_iter);
3527 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003528 struct intel_encoder *encoder;
3529
Todd Previteeb3394fa2015-04-18 00:04:19 -07003530 if (connector->connector_type !=
3531 DRM_MODE_CONNECTOR_DisplayPort)
3532 continue;
3533
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003534 encoder = to_intel_encoder(connector->encoder);
3535 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3536 continue;
3537
3538 if (encoder && connector->status == connector_status_connected) {
3539 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003540 if (intel_dp->compliance.test_type ==
3541 DP_TEST_LINK_EDID_READ)
3542 seq_printf(m, "%lx",
3543 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003544 else if (intel_dp->compliance.test_type ==
3545 DP_TEST_LINK_VIDEO_PATTERN) {
3546 seq_printf(m, "hdisplay: %d\n",
3547 intel_dp->compliance.test_data.hdisplay);
3548 seq_printf(m, "vdisplay: %d\n",
3549 intel_dp->compliance.test_data.vdisplay);
3550 seq_printf(m, "bpc: %u\n",
3551 intel_dp->compliance.test_data.bpc);
3552 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003553 } else
3554 seq_puts(m, "0");
3555 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003556 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003557
3558 return 0;
3559}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003560DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003561
3562static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3563{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003564 struct drm_i915_private *dev_priv = m->private;
3565 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003566 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003567 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003568 struct intel_dp *intel_dp;
3569
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003570 drm_connector_list_iter_begin(dev, &conn_iter);
3571 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003572 struct intel_encoder *encoder;
3573
Todd Previteeb3394fa2015-04-18 00:04:19 -07003574 if (connector->connector_type !=
3575 DRM_MODE_CONNECTOR_DisplayPort)
3576 continue;
3577
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003578 encoder = to_intel_encoder(connector->encoder);
3579 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3580 continue;
3581
3582 if (encoder && connector->status == connector_status_connected) {
3583 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003584 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003585 } else
3586 seq_puts(m, "0");
3587 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003588 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003589
3590 return 0;
3591}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003592DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003593
Jani Nikulae5315212019-01-16 11:15:23 +02003594static void wm_latency_show(struct seq_file *m, const u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003595{
David Weinehall36cdd012016-08-22 13:59:31 +03003596 struct drm_i915_private *dev_priv = m->private;
3597 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003598 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003599 int num_levels;
3600
David Weinehall36cdd012016-08-22 13:59:31 +03003601 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003602 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003603 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003604 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003605 else if (IS_G4X(dev_priv))
3606 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003607 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003608 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003609
3610 drm_modeset_lock_all(dev);
3611
3612 for (level = 0; level < num_levels; level++) {
3613 unsigned int latency = wm[level];
3614
Damien Lespiau97e94b22014-11-04 17:06:50 +00003615 /*
3616 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003617 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003618 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003619 if (INTEL_GEN(dev_priv) >= 9 ||
3620 IS_VALLEYVIEW(dev_priv) ||
3621 IS_CHERRYVIEW(dev_priv) ||
3622 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003623 latency *= 10;
3624 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003625 latency *= 5;
3626
3627 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003628 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003629 }
3630
3631 drm_modeset_unlock_all(dev);
3632}
3633
3634static int pri_wm_latency_show(struct seq_file *m, void *data)
3635{
David Weinehall36cdd012016-08-22 13:59:31 +03003636 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003637 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003638
David Weinehall36cdd012016-08-22 13:59:31 +03003639 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003640 latencies = dev_priv->wm.skl_latency;
3641 else
David Weinehall36cdd012016-08-22 13:59:31 +03003642 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003643
3644 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003645
3646 return 0;
3647}
3648
3649static int spr_wm_latency_show(struct seq_file *m, void *data)
3650{
David Weinehall36cdd012016-08-22 13:59:31 +03003651 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003652 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003653
David Weinehall36cdd012016-08-22 13:59:31 +03003654 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003655 latencies = dev_priv->wm.skl_latency;
3656 else
David Weinehall36cdd012016-08-22 13:59:31 +03003657 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003658
3659 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003660
3661 return 0;
3662}
3663
3664static int cur_wm_latency_show(struct seq_file *m, void *data)
3665{
David Weinehall36cdd012016-08-22 13:59:31 +03003666 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003667 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003668
David Weinehall36cdd012016-08-22 13:59:31 +03003669 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003670 latencies = dev_priv->wm.skl_latency;
3671 else
David Weinehall36cdd012016-08-22 13:59:31 +03003672 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003673
3674 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003675
3676 return 0;
3677}
3678
3679static int pri_wm_latency_open(struct inode *inode, struct file *file)
3680{
David Weinehall36cdd012016-08-22 13:59:31 +03003681 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003682
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003683 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003684 return -ENODEV;
3685
David Weinehall36cdd012016-08-22 13:59:31 +03003686 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003687}
3688
3689static int spr_wm_latency_open(struct inode *inode, struct file *file)
3690{
David Weinehall36cdd012016-08-22 13:59:31 +03003691 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003692
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003693 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003694 return -ENODEV;
3695
David Weinehall36cdd012016-08-22 13:59:31 +03003696 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003697}
3698
3699static int cur_wm_latency_open(struct inode *inode, struct file *file)
3700{
David Weinehall36cdd012016-08-22 13:59:31 +03003701 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003702
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003703 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003704 return -ENODEV;
3705
David Weinehall36cdd012016-08-22 13:59:31 +03003706 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003707}
3708
3709static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Jani Nikulae5315212019-01-16 11:15:23 +02003710 size_t len, loff_t *offp, u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003711{
3712 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003713 struct drm_i915_private *dev_priv = m->private;
3714 struct drm_device *dev = &dev_priv->drm;
Jani Nikulae5315212019-01-16 11:15:23 +02003715 u16 new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003716 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003717 int level;
3718 int ret;
3719 char tmp[32];
3720
David Weinehall36cdd012016-08-22 13:59:31 +03003721 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003722 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003723 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003724 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003725 else if (IS_G4X(dev_priv))
3726 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003727 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003728 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003729
Ville Syrjälä369a1342014-01-22 14:36:08 +02003730 if (len >= sizeof(tmp))
3731 return -EINVAL;
3732
3733 if (copy_from_user(tmp, ubuf, len))
3734 return -EFAULT;
3735
3736 tmp[len] = '\0';
3737
Damien Lespiau97e94b22014-11-04 17:06:50 +00003738 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3739 &new[0], &new[1], &new[2], &new[3],
3740 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003741 if (ret != num_levels)
3742 return -EINVAL;
3743
3744 drm_modeset_lock_all(dev);
3745
3746 for (level = 0; level < num_levels; level++)
3747 wm[level] = new[level];
3748
3749 drm_modeset_unlock_all(dev);
3750
3751 return len;
3752}
3753
3754
3755static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3756 size_t len, loff_t *offp)
3757{
3758 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003759 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003760 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003761
David Weinehall36cdd012016-08-22 13:59:31 +03003762 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003763 latencies = dev_priv->wm.skl_latency;
3764 else
David Weinehall36cdd012016-08-22 13:59:31 +03003765 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003766
3767 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003768}
3769
3770static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3771 size_t len, loff_t *offp)
3772{
3773 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003774 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003775 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003776
David Weinehall36cdd012016-08-22 13:59:31 +03003777 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003778 latencies = dev_priv->wm.skl_latency;
3779 else
David Weinehall36cdd012016-08-22 13:59:31 +03003780 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003781
3782 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003783}
3784
3785static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3786 size_t len, loff_t *offp)
3787{
3788 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003789 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003790 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003791
David Weinehall36cdd012016-08-22 13:59:31 +03003792 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003793 latencies = dev_priv->wm.skl_latency;
3794 else
David Weinehall36cdd012016-08-22 13:59:31 +03003795 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003796
3797 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003798}
3799
3800static const struct file_operations i915_pri_wm_latency_fops = {
3801 .owner = THIS_MODULE,
3802 .open = pri_wm_latency_open,
3803 .read = seq_read,
3804 .llseek = seq_lseek,
3805 .release = single_release,
3806 .write = pri_wm_latency_write
3807};
3808
3809static const struct file_operations i915_spr_wm_latency_fops = {
3810 .owner = THIS_MODULE,
3811 .open = spr_wm_latency_open,
3812 .read = seq_read,
3813 .llseek = seq_lseek,
3814 .release = single_release,
3815 .write = spr_wm_latency_write
3816};
3817
3818static const struct file_operations i915_cur_wm_latency_fops = {
3819 .owner = THIS_MODULE,
3820 .open = cur_wm_latency_open,
3821 .read = seq_read,
3822 .llseek = seq_lseek,
3823 .release = single_release,
3824 .write = cur_wm_latency_write
3825};
3826
Kees Cook647416f2013-03-10 14:10:06 -07003827static int
3828i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003829{
Chris Wilsonc41166f2019-02-20 14:56:37 +00003830 int ret = i915_terminally_wedged(data);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003831
Chris Wilsonc41166f2019-02-20 14:56:37 +00003832 switch (ret) {
3833 case -EIO:
3834 *val = 1;
3835 return 0;
3836 case 0:
3837 *val = 0;
3838 return 0;
3839 default:
3840 return ret;
3841 }
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003842}
3843
Kees Cook647416f2013-03-10 14:10:06 -07003844static int
3845i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003846{
Chris Wilson598b6b52017-03-25 13:47:35 +00003847 struct drm_i915_private *i915 = data;
Imre Deakd46c0512014-04-14 20:24:27 +03003848
Chris Wilson15cbf002019-02-08 15:37:06 +00003849 /* Flush any previous reset before applying for a new one */
3850 wait_event(i915->gpu_error.reset_queue,
3851 !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003852
Chris Wilsonce800752018-03-20 10:04:49 +00003853 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3854 "Manually set wedged engine mask = %llx", val);
Kees Cook647416f2013-03-10 14:10:06 -07003855 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003856}
3857
Kees Cook647416f2013-03-10 14:10:06 -07003858DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3859 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03003860 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003861
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003862#define DROP_UNBOUND BIT(0)
3863#define DROP_BOUND BIT(1)
3864#define DROP_RETIRE BIT(2)
3865#define DROP_ACTIVE BIT(3)
3866#define DROP_FREED BIT(4)
3867#define DROP_SHRINK_ALL BIT(5)
3868#define DROP_IDLE BIT(6)
Chris Wilson6b048702018-09-03 09:33:37 +01003869#define DROP_RESET_ACTIVE BIT(7)
3870#define DROP_RESET_SEQNO BIT(8)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003871#define DROP_ALL (DROP_UNBOUND | \
3872 DROP_BOUND | \
3873 DROP_RETIRE | \
3874 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00003875 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003876 DROP_SHRINK_ALL |\
Chris Wilson6b048702018-09-03 09:33:37 +01003877 DROP_IDLE | \
3878 DROP_RESET_ACTIVE | \
3879 DROP_RESET_SEQNO)
Kees Cook647416f2013-03-10 14:10:06 -07003880static int
3881i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003882{
Kees Cook647416f2013-03-10 14:10:06 -07003883 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00003884
Kees Cook647416f2013-03-10 14:10:06 -07003885 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003886}
3887
Kees Cook647416f2013-03-10 14:10:06 -07003888static int
3889i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003890{
Chris Wilson6b048702018-09-03 09:33:37 +01003891 struct drm_i915_private *i915 = data;
Chris Wilsondd624af2013-01-15 12:39:35 +00003892
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003893 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3894 val, val & DROP_ALL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003895
Chris Wilsonad4062d2019-01-28 01:02:18 +00003896 if (val & DROP_RESET_ACTIVE &&
3897 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
Chris Wilson6b048702018-09-03 09:33:37 +01003898 i915_gem_set_wedged(i915);
3899
Chris Wilsondd624af2013-01-15 12:39:35 +00003900 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3901 * on ioctls on -EAGAIN. */
Chris Wilson6b048702018-09-03 09:33:37 +01003902 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
Chris Wilson6cffeb82019-03-18 09:51:49 +00003903 int ret;
3904
Chris Wilson6b048702018-09-03 09:33:37 +01003905 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00003906 if (ret)
Chris Wilson6cffeb82019-03-18 09:51:49 +00003907 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00003908
Chris Wilson00c26cf2017-05-24 17:26:53 +01003909 if (val & DROP_ACTIVE)
Chris Wilson6b048702018-09-03 09:33:37 +01003910 ret = i915_gem_wait_for_idle(i915,
Chris Wilson00c26cf2017-05-24 17:26:53 +01003911 I915_WAIT_INTERRUPTIBLE |
Chris Wilsonec625fb2018-07-09 13:20:42 +01003912 I915_WAIT_LOCKED,
3913 MAX_SCHEDULE_TIMEOUT);
Chris Wilson00c26cf2017-05-24 17:26:53 +01003914
Chris Wilson6b048702018-09-03 09:33:37 +01003915 if (val & DROP_RETIRE)
3916 i915_retire_requests(i915);
3917
3918 mutex_unlock(&i915->drm.struct_mutex);
3919 }
3920
Chris Wilsonc41166f2019-02-20 14:56:37 +00003921 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
Chris Wilson6b048702018-09-03 09:33:37 +01003922 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003923
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003924 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01003925 if (val & DROP_BOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003926 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01003927
Chris Wilson21ab4e72014-09-09 11:16:08 +01003928 if (val & DROP_UNBOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003929 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00003930
Chris Wilson8eadc192017-03-08 14:46:22 +00003931 if (val & DROP_SHRINK_ALL)
Chris Wilson6b048702018-09-03 09:33:37 +01003932 i915_gem_shrink_all(i915);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003933 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00003934
Chris Wilson4dfacb02018-05-31 09:22:43 +01003935 if (val & DROP_IDLE) {
3936 do {
Chris Wilson6b048702018-09-03 09:33:37 +01003937 if (READ_ONCE(i915->gt.active_requests))
3938 flush_delayed_work(&i915->gt.retire_work);
3939 drain_delayed_work(&i915->gt.idle_work);
3940 } while (READ_ONCE(i915->gt.awake));
Chris Wilson4dfacb02018-05-31 09:22:43 +01003941 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003942
Chris Wilsonc9c704712018-02-19 22:06:31 +00003943 if (val & DROP_FREED)
Chris Wilson6b048702018-09-03 09:33:37 +01003944 i915_gem_drain_freed_objects(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003945
Chris Wilson6cffeb82019-03-18 09:51:49 +00003946 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003947}
3948
Kees Cook647416f2013-03-10 14:10:06 -07003949DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3950 i915_drop_caches_get, i915_drop_caches_set,
3951 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00003952
Kees Cook647416f2013-03-10 14:10:06 -07003953static int
Kees Cook647416f2013-03-10 14:10:06 -07003954i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003955{
David Weinehall36cdd012016-08-22 13:59:31 +03003956 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003957 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00003958 u32 snpcr = 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003959
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003960 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003961 return -ENODEV;
3962
Chris Wilsond4225a52019-01-14 14:21:23 +00003963 with_intel_runtime_pm(dev_priv, wakeref)
3964 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003965
Kees Cook647416f2013-03-10 14:10:06 -07003966 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003967
Kees Cook647416f2013-03-10 14:10:06 -07003968 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003969}
3970
Kees Cook647416f2013-03-10 14:10:06 -07003971static int
3972i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003973{
David Weinehall36cdd012016-08-22 13:59:31 +03003974 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003975 intel_wakeref_t wakeref;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003976
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003977 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003978 return -ENODEV;
3979
Kees Cook647416f2013-03-10 14:10:06 -07003980 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003981 return -EINVAL;
3982
Kees Cook647416f2013-03-10 14:10:06 -07003983 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Chris Wilsond4225a52019-01-14 14:21:23 +00003984 with_intel_runtime_pm(dev_priv, wakeref) {
3985 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003986
Chris Wilsond4225a52019-01-14 14:21:23 +00003987 /* Update the cache sharing policy here as well */
3988 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3989 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3990 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3991 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3992 }
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003993
Kees Cook647416f2013-03-10 14:10:06 -07003994 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003995}
3996
Kees Cook647416f2013-03-10 14:10:06 -07003997DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3998 i915_cache_sharing_get, i915_cache_sharing_set,
3999 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004000
David Weinehall36cdd012016-08-22 13:59:31 +03004001static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004002 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004003{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004004#define SS_MAX 2
4005 const int ss_max = SS_MAX;
4006 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004007 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004008
4009 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4010 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4011 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4012 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4013
4014 for (ss = 0; ss < ss_max; ss++) {
4015 unsigned int eu_cnt;
4016
4017 if (sig1[ss] & CHV_SS_PG_ENABLE)
4018 /* skip disabled subslice */
4019 continue;
4020
Imre Deakf08a0c92016-08-31 19:13:04 +03004021 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004022 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004023 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4024 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4025 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4026 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004027 sseu->eu_total += eu_cnt;
4028 sseu->eu_per_subslice = max_t(unsigned int,
4029 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004030 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004031#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004032}
4033
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004034static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4035 struct sseu_dev_info *sseu)
4036{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004037#define SS_MAX 6
Jani Nikula02584042018-12-31 16:56:41 +02004038 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004039 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004040 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004041
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004042 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004043 /*
4044 * FIXME: Valid SS Mask respects the spec and read
Alexandre Belloni3c64ea82018-11-20 16:14:15 +01004045 * only valid bits for those registers, excluding reserved
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004046 * although this seems wrong because it would leave many
4047 * subslices without ACK.
4048 */
4049 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4050 GEN10_PGCTL_VALID_SS_MASK(s);
4051 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4052 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4053 }
4054
4055 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4056 GEN9_PGCTL_SSA_EU19_ACK |
4057 GEN9_PGCTL_SSA_EU210_ACK |
4058 GEN9_PGCTL_SSA_EU311_ACK;
4059 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4060 GEN9_PGCTL_SSB_EU19_ACK |
4061 GEN9_PGCTL_SSB_EU210_ACK |
4062 GEN9_PGCTL_SSB_EU311_ACK;
4063
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004064 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004065 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4066 /* skip disabled slice */
4067 continue;
4068
4069 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004070 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004071
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004072 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004073 unsigned int eu_cnt;
4074
4075 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4076 /* skip disabled subslice */
4077 continue;
4078
4079 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4080 eu_mask[ss % 2]);
4081 sseu->eu_total += eu_cnt;
4082 sseu->eu_per_subslice = max_t(unsigned int,
4083 sseu->eu_per_subslice,
4084 eu_cnt);
4085 }
4086 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004087#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004088}
4089
David Weinehall36cdd012016-08-22 13:59:31 +03004090static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004091 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004092{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004093#define SS_MAX 3
Jani Nikula02584042018-12-31 16:56:41 +02004094 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004095 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004096 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004097
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004098 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004099 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4100 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4101 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4102 }
4103
Jeff McGee5d395252015-04-03 18:13:17 -07004104 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4105 GEN9_PGCTL_SSA_EU19_ACK |
4106 GEN9_PGCTL_SSA_EU210_ACK |
4107 GEN9_PGCTL_SSA_EU311_ACK;
4108 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4109 GEN9_PGCTL_SSB_EU19_ACK |
4110 GEN9_PGCTL_SSB_EU210_ACK |
4111 GEN9_PGCTL_SSB_EU311_ACK;
4112
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004113 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004114 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4115 /* skip disabled slice */
4116 continue;
4117
Imre Deakf08a0c92016-08-31 19:13:04 +03004118 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004119
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004120 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004121 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004122 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004123
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004124 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004125 unsigned int eu_cnt;
4126
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004127 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004128 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4129 /* skip disabled subslice */
4130 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004131
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004132 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004133 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004134
Jeff McGee5d395252015-04-03 18:13:17 -07004135 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4136 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004137 sseu->eu_total += eu_cnt;
4138 sseu->eu_per_subslice = max_t(unsigned int,
4139 sseu->eu_per_subslice,
4140 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004141 }
4142 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004143#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004144}
4145
David Weinehall36cdd012016-08-22 13:59:31 +03004146static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004147 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004148{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004149 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004150 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004151
Imre Deakf08a0c92016-08-31 19:13:04 +03004152 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004153
Imre Deakf08a0c92016-08-31 19:13:04 +03004154 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004155 sseu->eu_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004156 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004157 for (s = 0; s < fls(sseu->slice_mask); s++) {
4158 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004159 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004160 }
Imre Deak57ec1712016-08-31 19:13:05 +03004161 sseu->eu_total = sseu->eu_per_subslice *
4162 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004163
4164 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004165 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004166 u8 subslice_7eu =
Jani Nikula02584042018-12-31 16:56:41 +02004167 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004168
Imre Deak915490d2016-08-31 19:13:01 +03004169 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004170 }
4171 }
4172}
4173
Imre Deak615d8902016-08-31 19:13:03 +03004174static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4175 const struct sseu_dev_info *sseu)
4176{
4177 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4178 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004179 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004180
Imre Deakc67ba532016-08-31 19:13:06 +03004181 seq_printf(m, " %s Slice Mask: %04x\n", type,
4182 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004183 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004184 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004185 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004186 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004187 for (s = 0; s < fls(sseu->slice_mask); s++) {
4188 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4189 s, hweight8(sseu->subslice_mask[s]));
4190 }
Imre Deak615d8902016-08-31 19:13:03 +03004191 seq_printf(m, " %s EU Total: %u\n", type,
4192 sseu->eu_total);
4193 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4194 sseu->eu_per_subslice);
4195
4196 if (!is_available_info)
4197 return;
4198
4199 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4200 if (HAS_POOLED_EU(dev_priv))
4201 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4202
4203 seq_printf(m, " Has Slice Power Gating: %s\n",
4204 yesno(sseu->has_slice_pg));
4205 seq_printf(m, " Has Subslice Power Gating: %s\n",
4206 yesno(sseu->has_subslice_pg));
4207 seq_printf(m, " Has EU Power Gating: %s\n",
4208 yesno(sseu->has_eu_pg));
4209}
4210
Jeff McGee38732182015-02-13 10:27:54 -06004211static int i915_sseu_status(struct seq_file *m, void *unused)
4212{
David Weinehall36cdd012016-08-22 13:59:31 +03004213 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004214 struct sseu_dev_info sseu;
Chris Wilsona0371212019-01-14 14:21:14 +00004215 intel_wakeref_t wakeref;
Jeff McGee38732182015-02-13 10:27:54 -06004216
David Weinehall36cdd012016-08-22 13:59:31 +03004217 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004218 return -ENODEV;
4219
4220 seq_puts(m, "SSEU Device Info\n");
Jani Nikula02584042018-12-31 16:56:41 +02004221 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004222
Jeff McGee7f992ab2015-02-13 10:27:55 -06004223 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004224 memset(&sseu, 0, sizeof(sseu));
Jani Nikula02584042018-12-31 16:56:41 +02004225 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4226 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004227 sseu.max_eus_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004228 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004229
Chris Wilsond4225a52019-01-14 14:21:23 +00004230 with_intel_runtime_pm(dev_priv, wakeref) {
4231 if (IS_CHERRYVIEW(dev_priv))
4232 cherryview_sseu_device_status(dev_priv, &sseu);
4233 else if (IS_BROADWELL(dev_priv))
4234 broadwell_sseu_device_status(dev_priv, &sseu);
4235 else if (IS_GEN(dev_priv, 9))
4236 gen9_sseu_device_status(dev_priv, &sseu);
4237 else if (INTEL_GEN(dev_priv) >= 10)
4238 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004239 }
David Weinehall238010e2016-08-01 17:33:27 +03004240
Imre Deak615d8902016-08-31 19:13:03 +03004241 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004242
Jeff McGee38732182015-02-13 10:27:54 -06004243 return 0;
4244}
4245
Ben Widawsky6d794d42011-04-25 11:25:56 -07004246static int i915_forcewake_open(struct inode *inode, struct file *file)
4247{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004248 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004249
Chris Wilsond7a133d2017-09-07 14:44:41 +01004250 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004251 return 0;
4252
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004253 file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
Chris Wilsond7a133d2017-09-07 14:44:41 +01004254 intel_uncore_forcewake_user_get(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004255
4256 return 0;
4257}
4258
Ben Widawskyc43b5632012-04-16 14:07:40 -07004259static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004260{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004261 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004262
Chris Wilsond7a133d2017-09-07 14:44:41 +01004263 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004264 return 0;
4265
Chris Wilsond7a133d2017-09-07 14:44:41 +01004266 intel_uncore_forcewake_user_put(i915);
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004267 intel_runtime_pm_put(i915,
4268 (intel_wakeref_t)(uintptr_t)file->private_data);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004269
4270 return 0;
4271}
4272
4273static const struct file_operations i915_forcewake_fops = {
4274 .owner = THIS_MODULE,
4275 .open = i915_forcewake_open,
4276 .release = i915_forcewake_release,
4277};
4278
Lyude317eaa92017-02-03 21:18:25 -05004279static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4280{
4281 struct drm_i915_private *dev_priv = m->private;
4282 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4283
Lyude Paul6fc5d782018-11-20 19:37:17 -05004284 /* Synchronize with everything first in case there's been an HPD
4285 * storm, but we haven't finished handling it in the kernel yet
4286 */
4287 synchronize_irq(dev_priv->drm.irq);
4288 flush_work(&dev_priv->hotplug.dig_port_work);
4289 flush_work(&dev_priv->hotplug.hotplug_work);
4290
Lyude317eaa92017-02-03 21:18:25 -05004291 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4292 seq_printf(m, "Detected: %s\n",
4293 yesno(delayed_work_pending(&hotplug->reenable_work)));
4294
4295 return 0;
4296}
4297
4298static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4299 const char __user *ubuf, size_t len,
4300 loff_t *offp)
4301{
4302 struct seq_file *m = file->private_data;
4303 struct drm_i915_private *dev_priv = m->private;
4304 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4305 unsigned int new_threshold;
4306 int i;
4307 char *newline;
4308 char tmp[16];
4309
4310 if (len >= sizeof(tmp))
4311 return -EINVAL;
4312
4313 if (copy_from_user(tmp, ubuf, len))
4314 return -EFAULT;
4315
4316 tmp[len] = '\0';
4317
4318 /* Strip newline, if any */
4319 newline = strchr(tmp, '\n');
4320 if (newline)
4321 *newline = '\0';
4322
4323 if (strcmp(tmp, "reset") == 0)
4324 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4325 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4326 return -EINVAL;
4327
4328 if (new_threshold > 0)
4329 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4330 new_threshold);
4331 else
4332 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4333
4334 spin_lock_irq(&dev_priv->irq_lock);
4335 hotplug->hpd_storm_threshold = new_threshold;
4336 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4337 for_each_hpd_pin(i)
4338 hotplug->stats[i].count = 0;
4339 spin_unlock_irq(&dev_priv->irq_lock);
4340
4341 /* Re-enable hpd immediately if we were in an irq storm */
4342 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4343
4344 return len;
4345}
4346
4347static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4348{
4349 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4350}
4351
4352static const struct file_operations i915_hpd_storm_ctl_fops = {
4353 .owner = THIS_MODULE,
4354 .open = i915_hpd_storm_ctl_open,
4355 .read = seq_read,
4356 .llseek = seq_lseek,
4357 .release = single_release,
4358 .write = i915_hpd_storm_ctl_write
4359};
4360
Lyude Paul9a64c652018-11-06 16:30:16 -05004361static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4362{
4363 struct drm_i915_private *dev_priv = m->private;
4364
4365 seq_printf(m, "Enabled: %s\n",
4366 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4367
4368 return 0;
4369}
4370
4371static int
4372i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4373{
4374 return single_open(file, i915_hpd_short_storm_ctl_show,
4375 inode->i_private);
4376}
4377
4378static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4379 const char __user *ubuf,
4380 size_t len, loff_t *offp)
4381{
4382 struct seq_file *m = file->private_data;
4383 struct drm_i915_private *dev_priv = m->private;
4384 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4385 char *newline;
4386 char tmp[16];
4387 int i;
4388 bool new_state;
4389
4390 if (len >= sizeof(tmp))
4391 return -EINVAL;
4392
4393 if (copy_from_user(tmp, ubuf, len))
4394 return -EFAULT;
4395
4396 tmp[len] = '\0';
4397
4398 /* Strip newline, if any */
4399 newline = strchr(tmp, '\n');
4400 if (newline)
4401 *newline = '\0';
4402
4403 /* Reset to the "default" state for this system */
4404 if (strcmp(tmp, "reset") == 0)
4405 new_state = !HAS_DP_MST(dev_priv);
4406 else if (kstrtobool(tmp, &new_state) != 0)
4407 return -EINVAL;
4408
4409 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4410 new_state ? "En" : "Dis");
4411
4412 spin_lock_irq(&dev_priv->irq_lock);
4413 hotplug->hpd_short_storm_enabled = new_state;
4414 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4415 for_each_hpd_pin(i)
4416 hotplug->stats[i].count = 0;
4417 spin_unlock_irq(&dev_priv->irq_lock);
4418
4419 /* Re-enable hpd immediately if we were in an irq storm */
4420 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4421
4422 return len;
4423}
4424
4425static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4426 .owner = THIS_MODULE,
4427 .open = i915_hpd_short_storm_ctl_open,
4428 .read = seq_read,
4429 .llseek = seq_lseek,
4430 .release = single_release,
4431 .write = i915_hpd_short_storm_ctl_write,
4432};
4433
C, Ramalingam35954e82017-11-08 00:08:23 +05304434static int i915_drrs_ctl_set(void *data, u64 val)
4435{
4436 struct drm_i915_private *dev_priv = data;
4437 struct drm_device *dev = &dev_priv->drm;
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004438 struct intel_crtc *crtc;
C, Ramalingam35954e82017-11-08 00:08:23 +05304439
4440 if (INTEL_GEN(dev_priv) < 7)
4441 return -ENODEV;
4442
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004443 for_each_intel_crtc(dev, crtc) {
4444 struct drm_connector_list_iter conn_iter;
4445 struct intel_crtc_state *crtc_state;
4446 struct drm_connector *connector;
4447 struct drm_crtc_commit *commit;
4448 int ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304449
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004450 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4451 if (ret)
4452 return ret;
4453
4454 crtc_state = to_intel_crtc_state(crtc->base.state);
4455
4456 if (!crtc_state->base.active ||
4457 !crtc_state->has_drrs)
4458 goto out;
4459
4460 commit = crtc_state->base.commit;
4461 if (commit) {
4462 ret = wait_for_completion_interruptible(&commit->hw_done);
4463 if (ret)
4464 goto out;
4465 }
4466
4467 drm_connector_list_iter_begin(dev, &conn_iter);
4468 drm_for_each_connector_iter(connector, &conn_iter) {
4469 struct intel_encoder *encoder;
4470 struct intel_dp *intel_dp;
4471
4472 if (!(crtc_state->base.connector_mask &
4473 drm_connector_mask(connector)))
4474 continue;
4475
4476 encoder = intel_attached_encoder(connector);
C, Ramalingam35954e82017-11-08 00:08:23 +05304477 if (encoder->type != INTEL_OUTPUT_EDP)
4478 continue;
4479
4480 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4481 val ? "en" : "dis", val);
4482
4483 intel_dp = enc_to_intel_dp(&encoder->base);
4484 if (val)
4485 intel_edp_drrs_enable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004486 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304487 else
4488 intel_edp_drrs_disable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004489 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304490 }
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004491 drm_connector_list_iter_end(&conn_iter);
4492
4493out:
4494 drm_modeset_unlock(&crtc->base.mutex);
4495 if (ret)
4496 return ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304497 }
C, Ramalingam35954e82017-11-08 00:08:23 +05304498
4499 return 0;
4500}
4501
4502DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4503
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004504static ssize_t
4505i915_fifo_underrun_reset_write(struct file *filp,
4506 const char __user *ubuf,
4507 size_t cnt, loff_t *ppos)
4508{
4509 struct drm_i915_private *dev_priv = filp->private_data;
4510 struct intel_crtc *intel_crtc;
4511 struct drm_device *dev = &dev_priv->drm;
4512 int ret;
4513 bool reset;
4514
4515 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4516 if (ret)
4517 return ret;
4518
4519 if (!reset)
4520 return cnt;
4521
4522 for_each_intel_crtc(dev, intel_crtc) {
4523 struct drm_crtc_commit *commit;
4524 struct intel_crtc_state *crtc_state;
4525
4526 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4527 if (ret)
4528 return ret;
4529
4530 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4531 commit = crtc_state->base.commit;
4532 if (commit) {
4533 ret = wait_for_completion_interruptible(&commit->hw_done);
4534 if (!ret)
4535 ret = wait_for_completion_interruptible(&commit->flip_done);
4536 }
4537
4538 if (!ret && crtc_state->base.active) {
4539 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4540 pipe_name(intel_crtc->pipe));
4541
4542 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4543 }
4544
4545 drm_modeset_unlock(&intel_crtc->base.mutex);
4546
4547 if (ret)
4548 return ret;
4549 }
4550
4551 ret = intel_fbc_reset_underrun(dev_priv);
4552 if (ret)
4553 return ret;
4554
4555 return cnt;
4556}
4557
4558static const struct file_operations i915_fifo_underrun_reset_ops = {
4559 .owner = THIS_MODULE,
4560 .open = simple_open,
4561 .write = i915_fifo_underrun_reset_write,
4562 .llseek = default_llseek,
4563};
4564
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004565static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004566 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004567 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004568 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004569 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004570 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004571 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004572 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004573 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004574 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004575 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004576 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004577 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004578 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304579 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004580 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004581 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004582 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004583 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004584 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004585 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004586 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004587 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004588 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004589 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004590 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004591 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004592 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004593 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004594 {"i915_swizzle_info", i915_swizzle_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004595 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004596 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004597 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004598 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004599 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004600 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004601 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004602 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004603 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004604 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004605 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004606 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004607 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004608 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004609 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304610 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004611 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004612};
Ben Gamari27c202a2009-07-01 22:26:52 -04004613#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004614
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004615static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004616 const char *name;
4617 const struct file_operations *fops;
4618} i915_debugfs_files[] = {
4619 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004620 {"i915_cache_sharing", &i915_cache_sharing_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004621 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004622#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004623 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004624 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004625#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004626 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004627 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4628 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4629 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004630 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004631 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4632 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304633 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004634 {"i915_guc_log_level", &i915_guc_log_level_fops},
4635 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304636 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
Lyude Paul9a64c652018-11-06 16:30:16 -05004637 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304638 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004639 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4640 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004641};
4642
Chris Wilson1dac8912016-06-24 14:00:17 +01004643int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004644{
Chris Wilson91c8a322016-07-05 10:40:23 +01004645 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004646 struct dentry *ent;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004647 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004648
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004649 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4650 minor->debugfs_root, to_i915(minor->dev),
4651 &i915_forcewake_fops);
4652 if (!ent)
4653 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004654
Daniel Vetter34b96742013-07-04 20:49:44 +02004655 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004656 ent = debugfs_create_file(i915_debugfs_files[i].name,
4657 S_IRUGO | S_IWUSR,
4658 minor->debugfs_root,
4659 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004660 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004661 if (!ent)
4662 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004663 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004664
Ben Gamari27c202a2009-07-01 22:26:52 -04004665 return drm_debugfs_create_files(i915_debugfs_list,
4666 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004667 minor->debugfs_root, minor);
4668}
4669
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004670struct dpcd_block {
4671 /* DPCD dump start address. */
4672 unsigned int offset;
4673 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4674 unsigned int end;
4675 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4676 size_t size;
4677 /* Only valid for eDP. */
4678 bool edp;
4679};
4680
4681static const struct dpcd_block i915_dpcd_debug[] = {
4682 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4683 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4684 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4685 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4686 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4687 { .offset = DP_SET_POWER },
4688 { .offset = DP_EDP_DPCD_REV },
4689 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4690 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4691 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4692};
4693
4694static int i915_dpcd_show(struct seq_file *m, void *data)
4695{
4696 struct drm_connector *connector = m->private;
4697 struct intel_dp *intel_dp =
4698 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Jani Nikulae5315212019-01-16 11:15:23 +02004699 u8 buf[16];
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004700 ssize_t err;
4701 int i;
4702
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004703 if (connector->status != connector_status_connected)
4704 return -ENODEV;
4705
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004706 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4707 const struct dpcd_block *b = &i915_dpcd_debug[i];
4708 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4709
4710 if (b->edp &&
4711 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4712 continue;
4713
4714 /* low tech for now */
4715 if (WARN_ON(size > sizeof(buf)))
4716 continue;
4717
4718 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
Chris Wilson65404c82018-10-10 09:17:06 +01004719 if (err < 0)
4720 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4721 else
4722 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004723 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004724
4725 return 0;
4726}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004727DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004728
David Weinehallecbd6782016-08-23 12:23:56 +03004729static int i915_panel_show(struct seq_file *m, void *data)
4730{
4731 struct drm_connector *connector = m->private;
4732 struct intel_dp *intel_dp =
4733 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4734
4735 if (connector->status != connector_status_connected)
4736 return -ENODEV;
4737
4738 seq_printf(m, "Panel power up delay: %d\n",
4739 intel_dp->panel_power_up_delay);
4740 seq_printf(m, "Panel power down delay: %d\n",
4741 intel_dp->panel_power_down_delay);
4742 seq_printf(m, "Backlight on delay: %d\n",
4743 intel_dp->backlight_on_delay);
4744 seq_printf(m, "Backlight off delay: %d\n",
4745 intel_dp->backlight_off_delay);
4746
4747 return 0;
4748}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004749DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004750
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304751static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4752{
4753 struct drm_connector *connector = m->private;
4754 struct intel_connector *intel_connector = to_intel_connector(connector);
4755
4756 if (connector->status != connector_status_connected)
4757 return -ENODEV;
4758
4759 /* HDCP is supported by connector */
Ramalingam Cd3dacc72018-10-29 15:15:46 +05304760 if (!intel_connector->hdcp.shim)
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304761 return -EINVAL;
4762
4763 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4764 connector->base.id);
4765 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4766 "None" : "HDCP1.4");
4767 seq_puts(m, "\n");
4768
4769 return 0;
4770}
4771DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4772
Manasi Navaree845f092018-12-05 16:54:07 -08004773static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4774{
4775 struct drm_connector *connector = m->private;
4776 struct drm_device *dev = connector->dev;
4777 struct drm_crtc *crtc;
4778 struct intel_dp *intel_dp;
4779 struct drm_modeset_acquire_ctx ctx;
4780 struct intel_crtc_state *crtc_state = NULL;
4781 int ret = 0;
4782 bool try_again = false;
4783
4784 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4785
4786 do {
Manasi Navare6afe8922018-12-19 15:51:20 -08004787 try_again = false;
Manasi Navaree845f092018-12-05 16:54:07 -08004788 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4789 &ctx);
4790 if (ret) {
4791 ret = -EINTR;
4792 break;
4793 }
4794 crtc = connector->state->crtc;
4795 if (connector->status != connector_status_connected || !crtc) {
4796 ret = -ENODEV;
4797 break;
4798 }
4799 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4800 if (ret == -EDEADLK) {
4801 ret = drm_modeset_backoff(&ctx);
4802 if (!ret) {
4803 try_again = true;
4804 continue;
4805 }
4806 break;
4807 } else if (ret) {
4808 break;
4809 }
4810 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4811 crtc_state = to_intel_crtc_state(crtc->state);
4812 seq_printf(m, "DSC_Enabled: %s\n",
4813 yesno(crtc_state->dsc_params.compression_enable));
Radhakrishna Sripadafed85692019-01-09 13:14:14 -08004814 seq_printf(m, "DSC_Sink_Support: %s\n",
4815 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
Manasi Navaree845f092018-12-05 16:54:07 -08004816 if (!intel_dp_is_edp(intel_dp))
4817 seq_printf(m, "FEC_Sink_Support: %s\n",
4818 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4819 } while (try_again);
4820
4821 drm_modeset_drop_locks(&ctx);
4822 drm_modeset_acquire_fini(&ctx);
4823
4824 return ret;
4825}
4826
4827static ssize_t i915_dsc_fec_support_write(struct file *file,
4828 const char __user *ubuf,
4829 size_t len, loff_t *offp)
4830{
4831 bool dsc_enable = false;
4832 int ret;
4833 struct drm_connector *connector =
4834 ((struct seq_file *)file->private_data)->private;
4835 struct intel_encoder *encoder = intel_attached_encoder(connector);
4836 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4837
4838 if (len == 0)
4839 return 0;
4840
4841 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4842 len);
4843
4844 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4845 if (ret < 0)
4846 return ret;
4847
4848 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4849 (dsc_enable) ? "true" : "false");
4850 intel_dp->force_dsc_en = dsc_enable;
4851
4852 *offp += len;
4853 return len;
4854}
4855
4856static int i915_dsc_fec_support_open(struct inode *inode,
4857 struct file *file)
4858{
4859 return single_open(file, i915_dsc_fec_support_show,
4860 inode->i_private);
4861}
4862
4863static const struct file_operations i915_dsc_fec_support_fops = {
4864 .owner = THIS_MODULE,
4865 .open = i915_dsc_fec_support_open,
4866 .read = seq_read,
4867 .llseek = seq_lseek,
4868 .release = single_release,
4869 .write = i915_dsc_fec_support_write
4870};
4871
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004872/**
4873 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4874 * @connector: pointer to a registered drm_connector
4875 *
4876 * Cleanup will be done by drm_connector_unregister() through a call to
4877 * drm_debugfs_connector_remove().
4878 *
4879 * Returns 0 on success, negative error codes on error.
4880 */
4881int i915_debugfs_connector_add(struct drm_connector *connector)
4882{
4883 struct dentry *root = connector->debugfs_entry;
Manasi Navaree845f092018-12-05 16:54:07 -08004884 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004885
4886 /* The connector must have been registered beforehands. */
4887 if (!root)
4888 return -ENODEV;
4889
4890 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4891 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004892 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4893 connector, &i915_dpcd_fops);
4894
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004895 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
David Weinehallecbd6782016-08-23 12:23:56 +03004896 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4897 connector, &i915_panel_fops);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004898 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4899 connector, &i915_psr_sink_status_fops);
4900 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004901
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304902 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4903 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4904 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4905 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4906 connector, &i915_hdcp_sink_capability_fops);
4907 }
4908
Manasi Navaree845f092018-12-05 16:54:07 -08004909 if (INTEL_GEN(dev_priv) >= 10 &&
4910 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4911 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4912 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4913 connector, &i915_dsc_fec_support_fops);
4914
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004915 return 0;
4916}