blob: 37c9aff234e036dc955ffbb9227e6d817ecba36e [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsonf3cd4742009-10-13 22:20:20 +010029#include <linux/debugfs.h>
Chris Wilsone637d2c2017-03-16 13:19:57 +000030#include <linux/sort.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010031#include <linux/sched/mm.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010032#include "intel_drv.h"
Sagar Arun Kamblea2695742017-11-16 19:02:41 +053033#include "intel_guc_submission.h"
Ben Gamari20172632009-02-17 20:08:50 -050034
David Weinehall36cdd012016-08-22 13:59:31 +030035static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36{
37 return to_i915(node->minor->dev);
38}
39
Chris Wilson70d39fe2010-08-25 16:03:34 +010040static int i915_capabilities(struct seq_file *m, void *data)
41{
David Weinehall36cdd012016-08-22 13:59:31 +030042 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000044 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010045
David Weinehall36cdd012016-08-22 13:59:31 +030046 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020047 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030048 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000049
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000050 intel_device_info_dump_flags(info, &p);
Jani Nikula02584042018-12-31 16:56:41 +020051 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000052 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010053
Chris Wilson418e3cd2017-02-06 21:36:08 +000054 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000055 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000056 kernel_param_unlock(THIS_MODULE);
57
Chris Wilson70d39fe2010-08-25 16:03:34 +010058 return 0;
59}
Ben Gamari433e12f2009-02-17 20:08:51 -050060
Imre Deaka7363de2016-05-12 16:18:52 +030061static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000062{
Chris Wilson573adb32016-08-04 16:32:39 +010063 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000064}
65
Imre Deaka7363de2016-05-12 16:18:52 +030066static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010067{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010068 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010069}
70
Imre Deaka7363de2016-05-12 16:18:52 +030071static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000072{
Chris Wilson3e510a82016-08-05 10:14:23 +010073 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040074 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010075 case I915_TILING_NONE: return ' ';
76 case I915_TILING_X: return 'X';
77 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040078 }
Chris Wilsona6172a82009-02-11 14:26:38 +000079}
80
Imre Deaka7363de2016-05-12 16:18:52 +030081static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070082{
Chris Wilsona65adaf2017-10-09 09:43:57 +010083 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010084}
85
Imre Deaka7363de2016-05-12 16:18:52 +030086static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010087{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010088 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -070089}
90
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010091static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92{
93 u64 size = 0;
94 struct i915_vma *vma;
95
Chris Wilsone2189dd2017-12-07 21:14:07 +000096 for_each_ggtt_vma(vma, obj) {
97 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010098 size += vma->node.size;
99 }
100
101 return size;
102}
103
Matthew Auld7393b7e2017-10-06 23:18:28 +0100104static const char *
105stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106{
107 size_t x = 0;
108
109 switch (page_sizes) {
110 case 0:
111 return "";
112 case I915_GTT_PAGE_SIZE_4K:
113 return "4K";
114 case I915_GTT_PAGE_SIZE_64K:
115 return "64K";
116 case I915_GTT_PAGE_SIZE_2M:
117 return "2M";
118 default:
119 if (!buf)
120 return "M";
121
122 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 x += snprintf(buf + x, len - x, "2M, ");
124 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 x += snprintf(buf + x, len - x, "64K, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 x += snprintf(buf + x, len - x, "4K, ");
128 buf[x-2] = '\0';
129
130 return buf;
131 }
132}
133
Chris Wilson37811fc2010-08-25 22:45:57 +0100134static void
135describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136{
Chris Wilsonb4716182015-04-27 13:41:17 +0100137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000138 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700139 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100140 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800141 int pin_count = 0;
142
Chris Wilson188c1ab2016-04-03 14:14:20 +0100143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144
Chris Wilsond07f0e52016-10-28 13:58:44 +0100145 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100146 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100147 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100148 get_pin_flag(obj),
149 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700150 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100151 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800152 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100153 obj->read_domains,
154 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300155 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100156 obj->mm.dirty ? " dirty" : "",
157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100158 if (obj->base.name)
159 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000160 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100161 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800162 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300163 }
164 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100165 if (obj->pin_global)
166 seq_printf(m, " (global)");
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100168 if (!drm_mm_node_allocated(&vma->node))
169 continue;
170
Matthew Auld7393b7e2017-10-06 23:18:28 +0100171 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100172 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100173 vma->node.start, vma->node.size,
174 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000175 if (i915_vma_is_ggtt(vma)) {
176 switch (vma->ggtt_view.type) {
177 case I915_GGTT_VIEW_NORMAL:
178 seq_puts(m, ", normal");
179 break;
180
181 case I915_GGTT_VIEW_PARTIAL:
182 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000183 vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000185 break;
186
187 case I915_GGTT_VIEW_ROTATED:
188 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000189 vma->ggtt_view.rotated.plane[0].width,
190 vma->ggtt_view.rotated.plane[0].height,
191 vma->ggtt_view.rotated.plane[0].stride,
192 vma->ggtt_view.rotated.plane[0].offset,
193 vma->ggtt_view.rotated.plane[1].width,
194 vma->ggtt_view.rotated.plane[1].height,
195 vma->ggtt_view.rotated.plane[1].stride,
196 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000197 break;
198
199 default:
200 MISSING_CASE(vma->ggtt_view.type);
201 break;
202 }
203 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100204 if (vma->fence)
205 seq_printf(m, " , fence: %d%s",
206 vma->fence->id,
207 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000208 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700209 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000210 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100211 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100212
Chris Wilsond07f0e52016-10-28 13:58:44 +0100213 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100214 if (engine)
215 seq_printf(m, " (%s)", engine->name);
216
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100217 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 if (frontbuffer_bits)
219 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100220}
221
Chris Wilsone637d2c2017-03-16 13:19:57 +0000222static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100223{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000224 const struct drm_i915_gem_object *a =
225 *(const struct drm_i915_gem_object **)A;
226 const struct drm_i915_gem_object *b =
227 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100228
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200229 if (a->stolen->start < b->stolen->start)
230 return -1;
231 if (a->stolen->start > b->stolen->start)
232 return 1;
233 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100234}
235
236static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237{
David Weinehall36cdd012016-08-22 13:59:31 +0300238 struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000240 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100241 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300242 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000243 unsigned long total, count, n;
244 int ret;
245
246 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200247 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000248 if (!objects)
249 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100250
251 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000253 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100254
255 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100256
257 spin_lock(&dev_priv->mm.obj_lock);
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000259 if (count == total)
260 break;
261
Chris Wilson6d2b88852013-08-07 18:30:54 +0100262 if (obj->stolen == NULL)
263 continue;
264
Chris Wilsone637d2c2017-03-16 13:19:57 +0000265 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100266 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100267 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000268
Chris Wilson6d2b88852013-08-07 18:30:54 +0100269 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100270 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000271 if (count == total)
272 break;
273
Chris Wilson6d2b88852013-08-07 18:30:54 +0100274 if (obj->stolen == NULL)
275 continue;
276
Chris Wilsone637d2c2017-03-16 13:19:57 +0000277 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100278 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100279 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100280 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100281
Chris Wilsone637d2c2017-03-16 13:19:57 +0000282 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284 seq_puts(m, "Stolen:\n");
285 for (n = 0; n < count; n++) {
286 seq_puts(m, " ");
287 describe_obj(m, objects[n]);
288 seq_putc(m, '\n');
289 }
290 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100291 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000292
293 mutex_unlock(&dev->struct_mutex);
294out:
Michal Hocko20981052017-05-17 14:23:12 +0200295 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000296 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100297}
298
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100299struct file_stats {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000300 struct i915_address_space *vm;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300301 unsigned long count;
302 u64 total, unbound;
303 u64 global, shared;
304 u64 active, inactive;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000305 u64 closed;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100306};
307
308static int per_file_stats(int id, void *ptr, void *data)
309{
310 struct drm_i915_gem_object *obj = ptr;
311 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000312 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100313
Chris Wilson0caf81b2017-06-17 12:57:44 +0100314 lockdep_assert_held(&obj->base.dev->struct_mutex);
315
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100316 stats->count++;
317 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100318 if (!obj->bind_count)
319 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000320 if (obj->base.name || obj->base.dma_buf)
321 stats->shared += obj->base.size;
322
Chris Wilson894eeec2016-08-04 07:52:20 +0100323 list_for_each_entry(vma, &obj->vma_list, obj_link) {
324 if (!drm_mm_node_allocated(&vma->node))
325 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000326
Chris Wilson3272db52016-08-04 16:32:32 +0100327 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100328 stats->global += vma->node.size;
329 } else {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000330 if (vma->vm != stats->vm)
Chris Wilson6313c202014-03-19 13:45:45 +0000331 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000332 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100333
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100334 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100335 stats->active += vma->node.size;
336 else
337 stats->inactive += vma->node.size;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000338
339 if (i915_vma_is_closed(vma))
340 stats->closed += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100341 }
342
343 return 0;
344}
345
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100346#define print_file_stats(m, name, stats) do { \
347 if (stats.count) \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000348 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100349 name, \
350 stats.count, \
351 stats.total, \
352 stats.active, \
353 stats.inactive, \
354 stats.global, \
355 stats.shared, \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000356 stats.unbound, \
357 stats.closed); \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100358} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800359
360static void print_batch_pool_stats(struct seq_file *m,
361 struct drm_i915_private *dev_priv)
362{
363 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000364 struct intel_engine_cs *engine;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000365 struct file_stats stats = {};
Akash Goel3b3f1652016-10-13 22:44:48 +0530366 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000367 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800368
Akash Goel3b3f1652016-10-13 22:44:48 +0530369 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000370 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100371 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000372 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100373 batch_pool_link)
374 per_file_stats(0, obj, &stats);
375 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100376 }
Brad Volkin493018d2014-12-11 12:13:08 -0800377
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100378 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800379}
380
Chris Wilson15da9562016-05-24 14:53:43 +0100381static void print_context_stats(struct seq_file *m,
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000382 struct drm_i915_private *i915)
Chris Wilson15da9562016-05-24 14:53:43 +0100383{
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000384 struct file_stats kstats = {};
385 struct i915_gem_context *ctx;
Chris Wilson15da9562016-05-24 14:53:43 +0100386
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000387 list_for_each_entry(ctx, &i915->contexts.list, link) {
388 struct intel_engine_cs *engine;
389 enum intel_engine_id id;
Chris Wilson15da9562016-05-24 14:53:43 +0100390
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000391 for_each_engine(engine, i915, id) {
392 struct intel_context *ce = to_intel_context(ctx, engine);
Chris Wilson15da9562016-05-24 14:53:43 +0100393
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000394 if (ce->state)
395 per_file_stats(0, ce->state->obj, &kstats);
396 if (ce->ring)
397 per_file_stats(0, ce->ring->vma->obj, &kstats);
398 }
399
400 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
401 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
402 struct drm_file *file = ctx->file_priv->file;
403 struct task_struct *task;
404 char name[80];
405
406 spin_lock(&file->table_lock);
407 idr_for_each(&file->object_idr, per_file_stats, &stats);
408 spin_unlock(&file->table_lock);
409
410 rcu_read_lock();
411 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
412 snprintf(name, sizeof(name), "%s/%d",
413 task ? task->comm : "<unknown>",
414 ctx->user_handle);
415 rcu_read_unlock();
416
417 print_file_stats(m, name, stats);
418 }
Chris Wilson15da9562016-05-24 14:53:43 +0100419 }
Chris Wilson15da9562016-05-24 14:53:43 +0100420
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000421 print_file_stats(m, "[k]contexts", kstats);
Chris Wilson15da9562016-05-24 14:53:43 +0100422}
423
David Weinehall36cdd012016-08-22 13:59:31 +0300424static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100425{
David Weinehall36cdd012016-08-22 13:59:31 +0300426 struct drm_i915_private *dev_priv = node_to_i915(m->private);
427 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300428 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100429 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
430 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000431 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100432 unsigned int page_sizes = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100433 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100434 int ret;
435
Chris Wilson3ef7f222016-10-18 13:02:48 +0100436 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000437 dev_priv->mm.object_count,
438 dev_priv->mm.object_memory);
439
Chris Wilson1544c422016-08-15 13:18:16 +0100440 size = count = 0;
441 mapped_size = mapped_count = 0;
442 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100443 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100444
445 spin_lock(&dev_priv->mm.obj_lock);
446 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100447 size += obj->base.size;
448 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200449
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100450 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200451 purgeable_size += obj->base.size;
452 ++purgeable_count;
453 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100454
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100455 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100456 mapped_count++;
457 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100458 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100459
460 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
461 huge_count++;
462 huge_size += obj->base.size;
463 page_sizes |= obj->mm.page_sizes.sg;
464 }
Chris Wilson6299f992010-11-24 12:23:44 +0000465 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100466 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
467
468 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100469 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100470 size += obj->base.size;
471 ++count;
472
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100473 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100474 dpy_size += obj->base.size;
475 ++dpy_count;
476 }
477
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100478 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100479 purgeable_size += obj->base.size;
480 ++purgeable_count;
481 }
482
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100483 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100484 mapped_count++;
485 mapped_size += obj->base.size;
486 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100487
488 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
489 huge_count++;
490 huge_size += obj->base.size;
491 page_sizes |= obj->mm.page_sizes.sg;
492 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100493 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100494 spin_unlock(&dev_priv->mm.obj_lock);
495
Chris Wilson2bd160a2016-08-15 10:48:45 +0100496 seq_printf(m, "%u bound objects, %llu bytes\n",
497 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300498 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200499 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100500 seq_printf(m, "%u mapped objects, %llu bytes\n",
501 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100502 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
503 huge_count,
504 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
505 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100506 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100507 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000508
Matthew Auldb7128ef2017-12-11 15:18:22 +0000509 seq_printf(m, "%llu [%pa] gtt total\n",
Chris Wilson82ad6442018-06-05 16:37:58 +0100510 ggtt->vm.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100511 seq_printf(m, "Supported page sizes: %s\n",
512 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
513 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100514
Damien Lespiau267f0c92013-06-24 22:59:48 +0100515 seq_putc(m, '\n');
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000516
517 ret = mutex_lock_interruptible(&dev->struct_mutex);
518 if (ret)
519 return ret;
520
Brad Volkin493018d2014-12-11 12:13:08 -0800521 print_batch_pool_stats(m, dev_priv);
Chris Wilson15da9562016-05-24 14:53:43 +0100522 print_context_stats(m, dev_priv);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000523 mutex_unlock(&dev->struct_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100524
525 return 0;
526}
527
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100528static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000529{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100530 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300531 struct drm_i915_private *dev_priv = node_to_i915(node);
532 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100533 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000534 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300535 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100536 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000537 int count, ret;
538
Chris Wilsonf2123812017-10-16 12:40:37 +0100539 nobject = READ_ONCE(dev_priv->mm.object_count);
540 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
541 if (!objects)
542 return -ENOMEM;
543
Chris Wilson08c18322011-01-10 00:00:24 +0000544 ret = mutex_lock_interruptible(&dev->struct_mutex);
545 if (ret)
546 return ret;
547
Chris Wilsonf2123812017-10-16 12:40:37 +0100548 count = 0;
549 spin_lock(&dev_priv->mm.obj_lock);
550 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
551 objects[count++] = obj;
552 if (count == nobject)
553 break;
554 }
555 spin_unlock(&dev_priv->mm.obj_lock);
556
557 total_obj_size = total_gtt_size = 0;
558 for (n = 0; n < count; n++) {
559 obj = objects[n];
560
Damien Lespiau267f0c92013-06-24 22:59:48 +0100561 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000562 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100563 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000564 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100565 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000566 }
567
568 mutex_unlock(&dev->struct_mutex);
569
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300570 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000571 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100572 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000573
574 return 0;
575}
576
Brad Volkin493018d2014-12-11 12:13:08 -0800577static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
578{
David Weinehall36cdd012016-08-22 13:59:31 +0300579 struct drm_i915_private *dev_priv = node_to_i915(m->private);
580 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800581 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000582 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530583 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100584 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000585 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800586
587 ret = mutex_lock_interruptible(&dev->struct_mutex);
588 if (ret)
589 return ret;
590
Akash Goel3b3f1652016-10-13 22:44:48 +0530591 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000592 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100593 int count;
594
595 count = 0;
596 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000597 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100598 batch_pool_link)
599 count++;
600 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000601 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100602
603 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000604 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100605 batch_pool_link) {
606 seq_puts(m, " ");
607 describe_obj(m, obj);
608 seq_putc(m, '\n');
609 }
610
611 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100612 }
Brad Volkin493018d2014-12-11 12:13:08 -0800613 }
614
Chris Wilson8d9d5742015-04-07 16:20:38 +0100615 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800616
617 mutex_unlock(&dev->struct_mutex);
618
619 return 0;
620}
621
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200622static void gen8_display_interrupt_info(struct seq_file *m)
623{
624 struct drm_i915_private *dev_priv = node_to_i915(m->private);
625 int pipe;
626
627 for_each_pipe(dev_priv, pipe) {
628 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000629 intel_wakeref_t wakeref;
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200630
631 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000632 wakeref = intel_display_power_get_if_enabled(dev_priv,
633 power_domain);
634 if (!wakeref) {
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200635 seq_printf(m, "Pipe %c power disabled\n",
636 pipe_name(pipe));
637 continue;
638 }
639 seq_printf(m, "Pipe %c IMR:\t%08x\n",
640 pipe_name(pipe),
641 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
642 seq_printf(m, "Pipe %c IIR:\t%08x\n",
643 pipe_name(pipe),
644 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
645 seq_printf(m, "Pipe %c IER:\t%08x\n",
646 pipe_name(pipe),
647 I915_READ(GEN8_DE_PIPE_IER(pipe)));
648
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000649 intel_display_power_put(dev_priv, power_domain, wakeref);
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200650 }
651
652 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
653 I915_READ(GEN8_DE_PORT_IMR));
654 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
655 I915_READ(GEN8_DE_PORT_IIR));
656 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
657 I915_READ(GEN8_DE_PORT_IER));
658
659 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
660 I915_READ(GEN8_DE_MISC_IMR));
661 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
662 I915_READ(GEN8_DE_MISC_IIR));
663 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
664 I915_READ(GEN8_DE_MISC_IER));
665
666 seq_printf(m, "PCU interrupt mask:\t%08x\n",
667 I915_READ(GEN8_PCU_IMR));
668 seq_printf(m, "PCU interrupt identity:\t%08x\n",
669 I915_READ(GEN8_PCU_IIR));
670 seq_printf(m, "PCU interrupt enable:\t%08x\n",
671 I915_READ(GEN8_PCU_IER));
672}
673
Ben Gamari20172632009-02-17 20:08:50 -0500674static int i915_interrupt_info(struct seq_file *m, void *data)
675{
David Weinehall36cdd012016-08-22 13:59:31 +0300676 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000677 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530678 enum intel_engine_id id;
Chris Wilsona0371212019-01-14 14:21:14 +0000679 intel_wakeref_t wakeref;
Chris Wilson4bb05042016-09-03 07:53:43 +0100680 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100681
Chris Wilsona0371212019-01-14 14:21:14 +0000682 wakeref = intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500683
David Weinehall36cdd012016-08-22 13:59:31 +0300684 if (IS_CHERRYVIEW(dev_priv)) {
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000685 intel_wakeref_t pref;
686
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300687 seq_printf(m, "Master Interrupt Control:\t%08x\n",
688 I915_READ(GEN8_MASTER_IRQ));
689
690 seq_printf(m, "Display IER:\t%08x\n",
691 I915_READ(VLV_IER));
692 seq_printf(m, "Display IIR:\t%08x\n",
693 I915_READ(VLV_IIR));
694 seq_printf(m, "Display IIR_RW:\t%08x\n",
695 I915_READ(VLV_IIR_RW));
696 seq_printf(m, "Display IMR:\t%08x\n",
697 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100698 for_each_pipe(dev_priv, pipe) {
699 enum intel_display_power_domain power_domain;
700
701 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000702 pref = intel_display_power_get_if_enabled(dev_priv,
703 power_domain);
704 if (!pref) {
Chris Wilson9c870d02016-10-24 13:42:15 +0100705 seq_printf(m, "Pipe %c power disabled\n",
706 pipe_name(pipe));
707 continue;
708 }
709
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300710 seq_printf(m, "Pipe %c stat:\t%08x\n",
711 pipe_name(pipe),
712 I915_READ(PIPESTAT(pipe)));
713
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000714 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson9c870d02016-10-24 13:42:15 +0100715 }
716
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000717 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300718 seq_printf(m, "Port hotplug:\t%08x\n",
719 I915_READ(PORT_HOTPLUG_EN));
720 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
721 I915_READ(VLV_DPFLIPSTAT));
722 seq_printf(m, "DPINVGTT:\t%08x\n",
723 I915_READ(DPINVGTT));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000724 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300725
726 for (i = 0; i < 4; i++) {
727 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
728 i, I915_READ(GEN8_GT_IMR(i)));
729 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
730 i, I915_READ(GEN8_GT_IIR(i)));
731 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
732 i, I915_READ(GEN8_GT_IER(i)));
733 }
734
735 seq_printf(m, "PCU interrupt mask:\t%08x\n",
736 I915_READ(GEN8_PCU_IMR));
737 seq_printf(m, "PCU interrupt identity:\t%08x\n",
738 I915_READ(GEN8_PCU_IIR));
739 seq_printf(m, "PCU interrupt enable:\t%08x\n",
740 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200741 } else if (INTEL_GEN(dev_priv) >= 11) {
742 seq_printf(m, "Master Interrupt Control: %08x\n",
743 I915_READ(GEN11_GFX_MSTR_IRQ));
744
745 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
746 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
747 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
748 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
749 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
750 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
751 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
752 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
753 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
754 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
755 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
756 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
757
758 seq_printf(m, "Display Interrupt Control:\t%08x\n",
759 I915_READ(GEN11_DISPLAY_INT_CTL));
760
761 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300762 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700763 seq_printf(m, "Master Interrupt Control:\t%08x\n",
764 I915_READ(GEN8_MASTER_IRQ));
765
766 for (i = 0; i < 4; i++) {
767 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
768 i, I915_READ(GEN8_GT_IMR(i)));
769 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
770 i, I915_READ(GEN8_GT_IIR(i)));
771 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
772 i, I915_READ(GEN8_GT_IER(i)));
773 }
774
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200775 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300776 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700777 seq_printf(m, "Display IER:\t%08x\n",
778 I915_READ(VLV_IER));
779 seq_printf(m, "Display IIR:\t%08x\n",
780 I915_READ(VLV_IIR));
781 seq_printf(m, "Display IIR_RW:\t%08x\n",
782 I915_READ(VLV_IIR_RW));
783 seq_printf(m, "Display IMR:\t%08x\n",
784 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000785 for_each_pipe(dev_priv, pipe) {
786 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000787 intel_wakeref_t pref;
Chris Wilson4f4631a2017-02-10 13:36:32 +0000788
789 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000790 pref = intel_display_power_get_if_enabled(dev_priv,
791 power_domain);
792 if (!pref) {
Chris Wilson4f4631a2017-02-10 13:36:32 +0000793 seq_printf(m, "Pipe %c power disabled\n",
794 pipe_name(pipe));
795 continue;
796 }
797
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700798 seq_printf(m, "Pipe %c stat:\t%08x\n",
799 pipe_name(pipe),
800 I915_READ(PIPESTAT(pipe)));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000801 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson4f4631a2017-02-10 13:36:32 +0000802 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700803
804 seq_printf(m, "Master IER:\t%08x\n",
805 I915_READ(VLV_MASTER_IER));
806
807 seq_printf(m, "Render IER:\t%08x\n",
808 I915_READ(GTIER));
809 seq_printf(m, "Render IIR:\t%08x\n",
810 I915_READ(GTIIR));
811 seq_printf(m, "Render IMR:\t%08x\n",
812 I915_READ(GTIMR));
813
814 seq_printf(m, "PM IER:\t\t%08x\n",
815 I915_READ(GEN6_PMIER));
816 seq_printf(m, "PM IIR:\t\t%08x\n",
817 I915_READ(GEN6_PMIIR));
818 seq_printf(m, "PM IMR:\t\t%08x\n",
819 I915_READ(GEN6_PMIMR));
820
821 seq_printf(m, "Port hotplug:\t%08x\n",
822 I915_READ(PORT_HOTPLUG_EN));
823 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
824 I915_READ(VLV_DPFLIPSTAT));
825 seq_printf(m, "DPINVGTT:\t%08x\n",
826 I915_READ(DPINVGTT));
827
David Weinehall36cdd012016-08-22 13:59:31 +0300828 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800829 seq_printf(m, "Interrupt enable: %08x\n",
830 I915_READ(IER));
831 seq_printf(m, "Interrupt identity: %08x\n",
832 I915_READ(IIR));
833 seq_printf(m, "Interrupt mask: %08x\n",
834 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100835 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800836 seq_printf(m, "Pipe %c stat: %08x\n",
837 pipe_name(pipe),
838 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800839 } else {
840 seq_printf(m, "North Display Interrupt enable: %08x\n",
841 I915_READ(DEIER));
842 seq_printf(m, "North Display Interrupt identity: %08x\n",
843 I915_READ(DEIIR));
844 seq_printf(m, "North Display Interrupt mask: %08x\n",
845 I915_READ(DEIMR));
846 seq_printf(m, "South Display Interrupt enable: %08x\n",
847 I915_READ(SDEIER));
848 seq_printf(m, "South Display Interrupt identity: %08x\n",
849 I915_READ(SDEIIR));
850 seq_printf(m, "South Display Interrupt mask: %08x\n",
851 I915_READ(SDEIMR));
852 seq_printf(m, "Graphics Interrupt enable: %08x\n",
853 I915_READ(GTIER));
854 seq_printf(m, "Graphics Interrupt identity: %08x\n",
855 I915_READ(GTIIR));
856 seq_printf(m, "Graphics Interrupt mask: %08x\n",
857 I915_READ(GTIMR));
858 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200859
860 if (INTEL_GEN(dev_priv) >= 11) {
861 seq_printf(m, "RCS Intr Mask:\t %08x\n",
862 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
863 seq_printf(m, "BCS Intr Mask:\t %08x\n",
864 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
865 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
866 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
867 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
868 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
869 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
870 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
871 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
872 I915_READ(GEN11_GUC_SG_INTR_MASK));
873 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
874 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
875 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
876 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
877 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
878 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
879
880 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000881 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100882 seq_printf(m,
883 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000884 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000885 }
Chris Wilson9862e602011-01-04 22:22:17 +0000886 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200887
Chris Wilsona0371212019-01-14 14:21:14 +0000888 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100889
Ben Gamari20172632009-02-17 20:08:50 -0500890 return 0;
891}
892
Chris Wilsona6172a82009-02-11 14:26:38 +0000893static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
894{
David Weinehall36cdd012016-08-22 13:59:31 +0300895 struct drm_i915_private *dev_priv = node_to_i915(m->private);
896 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100897 int i, ret;
898
899 ret = mutex_lock_interruptible(&dev->struct_mutex);
900 if (ret)
901 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000902
Chris Wilsona6172a82009-02-11 14:26:38 +0000903 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
904 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100905 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000906
Chris Wilson6c085a72012-08-20 11:40:46 +0200907 seq_printf(m, "Fence %d, pin count = %d, object = ",
908 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100909 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100910 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100911 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100912 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100913 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000914 }
915
Chris Wilson05394f32010-11-08 19:18:58 +0000916 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000917 return 0;
918}
919
Chris Wilson98a2f412016-10-12 10:05:18 +0100920#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000921static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
922 size_t count, loff_t *pos)
923{
Chris Wilson0e390372018-11-23 13:23:25 +0000924 struct i915_gpu_state *error;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000925 ssize_t ret;
Chris Wilson0e390372018-11-23 13:23:25 +0000926 void *buf;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000927
Chris Wilson0e390372018-11-23 13:23:25 +0000928 error = file->private_data;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000929 if (!error)
930 return 0;
931
Chris Wilson0e390372018-11-23 13:23:25 +0000932 /* Bounce buffer required because of kernfs __user API convenience. */
933 buf = kmalloc(count, GFP_KERNEL);
934 if (!buf)
935 return -ENOMEM;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000936
Chris Wilson0e390372018-11-23 13:23:25 +0000937 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
938 if (ret <= 0)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000939 goto out;
940
Chris Wilson0e390372018-11-23 13:23:25 +0000941 if (!copy_to_user(ubuf, buf, ret))
942 *pos += ret;
943 else
944 ret = -EFAULT;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000945
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000946out:
Chris Wilson0e390372018-11-23 13:23:25 +0000947 kfree(buf);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000948 return ret;
949}
950
951static int gpu_state_release(struct inode *inode, struct file *file)
952{
953 i915_gpu_state_put(file->private_data);
954 return 0;
955}
956
957static int i915_gpu_info_open(struct inode *inode, struct file *file)
958{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100959 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000960 struct i915_gpu_state *gpu;
Chris Wilsona0371212019-01-14 14:21:14 +0000961 intel_wakeref_t wakeref;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000962
Chris Wilsond4225a52019-01-14 14:21:23 +0000963 gpu = NULL;
964 with_intel_runtime_pm(i915, wakeref)
965 gpu = i915_capture_gpu_state(i915);
Chris Wilsone6154e42018-12-07 11:05:54 +0000966 if (IS_ERR(gpu))
967 return PTR_ERR(gpu);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000968
969 file->private_data = gpu;
970 return 0;
971}
972
973static const struct file_operations i915_gpu_info_fops = {
974 .owner = THIS_MODULE,
975 .open = i915_gpu_info_open,
976 .read = gpu_state_read,
977 .llseek = default_llseek,
978 .release = gpu_state_release,
979};
Chris Wilson98a2f412016-10-12 10:05:18 +0100980
Daniel Vetterd5442302012-04-27 15:17:40 +0200981static ssize_t
982i915_error_state_write(struct file *filp,
983 const char __user *ubuf,
984 size_t cnt,
985 loff_t *ppos)
986{
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000987 struct i915_gpu_state *error = filp->private_data;
988
989 if (!error)
990 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200991
992 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000993 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +0200994
995 return cnt;
996}
997
998static int i915_error_state_open(struct inode *inode, struct file *file)
999{
Chris Wilsone6154e42018-12-07 11:05:54 +00001000 struct i915_gpu_state *error;
1001
1002 error = i915_first_error_state(inode->i_private);
1003 if (IS_ERR(error))
1004 return PTR_ERR(error);
1005
1006 file->private_data = error;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001007 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001008}
1009
Daniel Vetterd5442302012-04-27 15:17:40 +02001010static const struct file_operations i915_error_state_fops = {
1011 .owner = THIS_MODULE,
1012 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001013 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001014 .write = i915_error_state_write,
1015 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001016 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001017};
Chris Wilson98a2f412016-10-12 10:05:18 +01001018#endif
1019
Deepak Sadb4bd12014-03-31 11:30:02 +05301020static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001021{
David Weinehall36cdd012016-08-22 13:59:31 +03001022 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001023 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsona0371212019-01-14 14:21:14 +00001024 intel_wakeref_t wakeref;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001025 int ret = 0;
1026
Chris Wilsona0371212019-01-14 14:21:14 +00001027 wakeref = intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001028
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001029 if (IS_GEN(dev_priv, 5)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001030 u16 rgvswctl = I915_READ16(MEMSWCTL);
1031 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1032
1033 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1034 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1035 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1036 MEMSTAT_VID_SHIFT);
1037 seq_printf(m, "Current P-state: %d\n",
1038 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001039 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001040 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001041
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001042 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001043
1044 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1045 seq_printf(m, "Video Turbo Mode: %s\n",
1046 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1047 seq_printf(m, "HW control enabled: %s\n",
1048 yesno(rpmodectl & GEN6_RP_ENABLE));
1049 seq_printf(m, "SW control enabled: %s\n",
1050 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1051 GEN6_RP_MEDIA_SW_MODE));
1052
Wayne Boyer666a4532015-12-09 12:29:35 -08001053 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1054 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1055 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1056
1057 seq_printf(m, "actual GPU freq: %d MHz\n",
1058 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1059
1060 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001061 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001062
1063 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001064 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001065
1066 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001067 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001068
1069 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001070 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001071
1072 seq_printf(m,
1073 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001074 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001075 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001076 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001077 u32 rp_state_limits;
1078 u32 gt_perf_status;
1079 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001080 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001081 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001082 u32 rpupei, rpcurup, rpprevup;
1083 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001084 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001085 int max_freq;
1086
Bob Paauwe35040562015-06-25 14:54:07 -07001087 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001088 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001089 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1090 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1091 } else {
1092 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1093 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1094 }
1095
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001096 /* RPSTAT1 is in the GT power well */
Mika Kuoppala59bad942015-01-16 11:34:40 +02001097 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001098
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001099 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001100 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301101 reqf >>= 23;
1102 else {
1103 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001104 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301105 reqf >>= 24;
1106 else
1107 reqf >>= 25;
1108 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001109 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001110
Chris Wilson0d8f9492014-03-27 09:06:14 +00001111 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1112 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1113 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1114
Jesse Barnesccab5c82011-01-18 15:49:25 -08001115 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301116 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1117 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1118 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1119 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1120 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1121 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001122 cagf = intel_gpu_freq(dev_priv,
1123 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001124
Mika Kuoppala59bad942015-01-16 11:34:40 +02001125 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001126
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001127 if (INTEL_GEN(dev_priv) >= 11) {
1128 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1129 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1130 /*
1131 * The equivalent to the PM ISR & IIR cannot be read
1132 * without affecting the current state of the system
1133 */
1134 pm_isr = 0;
1135 pm_iir = 0;
1136 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001137 pm_ier = I915_READ(GEN8_GT_IER(2));
1138 pm_imr = I915_READ(GEN8_GT_IMR(2));
1139 pm_isr = I915_READ(GEN8_GT_ISR(2));
1140 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001141 } else {
1142 pm_ier = I915_READ(GEN6_PMIER);
1143 pm_imr = I915_READ(GEN6_PMIMR);
1144 pm_isr = I915_READ(GEN6_PMISR);
1145 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001146 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001147 pm_mask = I915_READ(GEN6_PMINTRMSK);
1148
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001149 seq_printf(m, "Video Turbo Mode: %s\n",
1150 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1151 seq_printf(m, "HW control enabled: %s\n",
1152 yesno(rpmodectl & GEN6_RP_ENABLE));
1153 seq_printf(m, "SW control enabled: %s\n",
1154 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1155 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001156
1157 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1158 pm_ier, pm_imr, pm_mask);
1159 if (INTEL_GEN(dev_priv) <= 10)
1160 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1161 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301162 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001163 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001164 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001165 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001166 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001167 seq_printf(m, "Render p-state VID: %d\n",
1168 gt_perf_status & 0xff);
1169 seq_printf(m, "Render p-state limit: %d\n",
1170 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001171 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1172 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1173 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1174 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001175 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001176 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301177 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1178 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1179 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1180 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1181 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1182 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilson60548c52018-07-31 14:26:29 +01001183 seq_printf(m, "Up threshold: %d%%\n",
1184 rps->power.up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001185
Akash Goeld6cda9c2016-04-23 00:05:46 +05301186 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1187 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1188 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1189 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1190 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1191 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilson60548c52018-07-31 14:26:29 +01001192 seq_printf(m, "Down threshold: %d%%\n",
1193 rps->power.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001194
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001195 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001196 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001197 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001198 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001199 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001200 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001201
1202 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001203 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001204 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001205 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001206 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001207
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001208 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001209 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001210 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001211 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001212 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001213 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001214 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001215 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001216
Chris Wilsond86ed342015-04-27 13:41:19 +01001217 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001218 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001219 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001220 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001221 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001222 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001223 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001224 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001225 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001226 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001227 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001228 seq_printf(m,
1229 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001230 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001231 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001232 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001233 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001234
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001235 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001236 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1237 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1238
Chris Wilsona0371212019-01-14 14:21:14 +00001239 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001240 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001241}
1242
Ben Widawskyd6369512016-09-20 16:54:32 +03001243static void i915_instdone_info(struct drm_i915_private *dev_priv,
1244 struct seq_file *m,
1245 struct intel_instdone *instdone)
1246{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001247 int slice;
1248 int subslice;
1249
Ben Widawskyd6369512016-09-20 16:54:32 +03001250 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1251 instdone->instdone);
1252
1253 if (INTEL_GEN(dev_priv) <= 3)
1254 return;
1255
1256 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1257 instdone->slice_common);
1258
1259 if (INTEL_GEN(dev_priv) <= 6)
1260 return;
1261
Ben Widawskyf9e61372016-09-20 16:54:33 +03001262 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1263 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1264 slice, subslice, instdone->sampler[slice][subslice]);
1265
1266 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1267 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1268 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001269}
1270
Chris Wilsonf6544492015-01-26 18:03:04 +02001271static int i915_hangcheck_info(struct seq_file *m, void *unused)
1272{
David Weinehall36cdd012016-08-22 13:59:31 +03001273 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001274 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001275 u64 acthd[I915_NUM_ENGINES];
1276 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001277 struct intel_instdone instdone;
Chris Wilsona0371212019-01-14 14:21:14 +00001278 intel_wakeref_t wakeref;
Dave Gordonc3232b12016-03-23 18:19:53 +00001279 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001280
Chris Wilson8af29b02016-09-09 14:11:47 +01001281 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001282 seq_puts(m, "Wedged\n");
1283 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1284 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1285 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1286 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001287 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001288 seq_puts(m, "Waiter holding struct mutex\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001289 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001290 seq_puts(m, "struct_mutex blocked for reset\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001291
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001292 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001293 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001294 return 0;
1295 }
1296
Chris Wilsond4225a52019-01-14 14:21:23 +00001297 with_intel_runtime_pm(dev_priv, wakeref) {
1298 for_each_engine(engine, dev_priv, id) {
1299 acthd[id] = intel_engine_get_active_head(engine);
1300 seqno[id] = intel_engine_get_seqno(engine);
1301 }
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001302
Chris Wilsond4225a52019-01-14 14:21:23 +00001303 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001304 }
1305
Chris Wilson8352aea2017-03-03 09:00:56 +00001306 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1307 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001308 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1309 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001310 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1311 seq_puts(m, "Hangcheck active, work pending\n");
1312 else
1313 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001314
Chris Wilsonf73b5672017-03-02 15:03:56 +00001315 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1316
Akash Goel3b3f1652016-10-13 22:44:48 +05301317 for_each_engine(engine, dev_priv, id) {
Chris Wilson33f53712016-10-04 21:11:32 +01001318 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1319 struct rb_node *rb;
1320
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001321 seq_printf(m, "%s:\n", engine->name);
Chris Wilson52d7f162018-04-30 14:15:00 +01001322 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
Chris Wilsoncb399ea2016-11-01 10:03:16 +00001323 engine->hangcheck.seqno, seqno[id],
Chris Wilson52d7f162018-04-30 14:15:00 +01001324 intel_engine_last_submit(engine));
Chris Wilson1fd00c0f2018-06-02 11:48:53 +01001325 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
Chris Wilson83348ba2016-08-09 17:47:51 +01001326 yesno(intel_engine_has_waiter(engine)),
1327 yesno(test_bit(engine->id,
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001328 &dev_priv->gpu_error.missed_irq_rings)),
Chris Wilson1fd00c0f2018-06-02 11:48:53 +01001329 yesno(engine->hangcheck.stalled),
1330 yesno(engine->hangcheck.wedged));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001331
Chris Wilson61d3dc72017-03-03 19:08:24 +00001332 spin_lock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001333 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
Geliang Tangf802cf72016-12-19 22:43:49 +08001334 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
Chris Wilson33f53712016-10-04 21:11:32 +01001335
1336 seq_printf(m, "\t%s [%d] waiting for %x\n",
1337 w->tsk->comm, w->tsk->pid, w->seqno);
1338 }
Chris Wilson61d3dc72017-03-03 19:08:24 +00001339 spin_unlock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001340
Chris Wilsonf6544492015-01-26 18:03:04 +02001341 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001342 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001343 (long long)acthd[id]);
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001344 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1345 hangcheck_action_to_str(engine->hangcheck.action),
1346 engine->hangcheck.action,
1347 jiffies_to_msecs(jiffies -
1348 engine->hangcheck.action_timestamp));
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001349
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001350 if (engine->id == RCS) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001351 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001352
Ben Widawskyd6369512016-09-20 16:54:32 +03001353 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001354
Ben Widawskyd6369512016-09-20 16:54:32 +03001355 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001356
Ben Widawskyd6369512016-09-20 16:54:32 +03001357 i915_instdone_info(dev_priv, m,
1358 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001359 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001360 }
1361
1362 return 0;
1363}
1364
Michel Thierry061d06a2017-06-20 10:57:49 +01001365static int i915_reset_info(struct seq_file *m, void *unused)
1366{
1367 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1368 struct i915_gpu_error *error = &dev_priv->gpu_error;
1369 struct intel_engine_cs *engine;
1370 enum intel_engine_id id;
1371
1372 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1373
1374 for_each_engine(engine, dev_priv, id) {
1375 seq_printf(m, "%s = %u\n", engine->name,
1376 i915_reset_engine_count(error, engine));
1377 }
1378
1379 return 0;
1380}
1381
Ben Widawsky4d855292011-12-12 19:34:16 -08001382static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001383{
David Weinehall36cdd012016-08-22 13:59:31 +03001384 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001385 u32 rgvmodectl, rstdbyctl;
1386 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001387
Ben Widawsky616fdb52011-10-05 11:44:54 -07001388 rgvmodectl = I915_READ(MEMMODECTL);
1389 rstdbyctl = I915_READ(RSTDBYCTL);
1390 crstandvid = I915_READ16(CRSTANDVID);
1391
Jani Nikula742f4912015-09-03 11:16:09 +03001392 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001393 seq_printf(m, "Boost freq: %d\n",
1394 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1395 MEMMODE_BOOST_FREQ_SHIFT);
1396 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001397 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001398 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001399 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001400 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001401 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001402 seq_printf(m, "Starting frequency: P%d\n",
1403 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001404 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001405 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001406 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1407 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1408 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1409 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001410 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001411 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001412 switch (rstdbyctl & RSX_STATUS_MASK) {
1413 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001414 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001415 break;
1416 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001417 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001418 break;
1419 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001420 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001421 break;
1422 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001423 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001424 break;
1425 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001426 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001427 break;
1428 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001429 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001430 break;
1431 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001432 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001433 break;
1434 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001435
1436 return 0;
1437}
1438
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001439static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001440{
Chris Wilson233ebf52017-03-23 10:19:44 +00001441 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001442 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001443 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001444
Chris Wilsond7a133d2017-09-07 14:44:41 +01001445 seq_printf(m, "user.bypass_count = %u\n",
1446 i915->uncore.user_forcewake.count);
1447
Chris Wilson233ebf52017-03-23 10:19:44 +00001448 for_each_fw_domain(fw_domain, i915, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001449 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001450 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001451 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001452
1453 return 0;
1454}
1455
Mika Kuoppala13628772017-03-15 17:43:02 +02001456static void print_rc6_res(struct seq_file *m,
1457 const char *title,
1458 const i915_reg_t reg)
1459{
1460 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1461
1462 seq_printf(m, "%s %u (%llu us)\n",
1463 title, I915_READ(reg),
1464 intel_rc6_residency_us(dev_priv, reg));
1465}
1466
Deepak S669ab5a2014-01-10 15:18:26 +05301467static int vlv_drpc_info(struct seq_file *m)
1468{
David Weinehall36cdd012016-08-22 13:59:31 +03001469 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001470 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301471
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001472 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301473 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1474
Deepak S669ab5a2014-01-10 15:18:26 +05301475 seq_printf(m, "RC6 Enabled: %s\n",
1476 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1477 GEN6_RC_CTL_EI_MODE(1))));
1478 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001479 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301480 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001481 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301482
Mika Kuoppala13628772017-03-15 17:43:02 +02001483 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1484 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001485
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001486 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301487}
1488
Ben Widawsky4d855292011-12-12 19:34:16 -08001489static int gen6_drpc_info(struct seq_file *m)
1490{
David Weinehall36cdd012016-08-22 13:59:31 +03001491 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001492 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301493 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001494
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001495 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001496 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001497
Ben Widawsky4d855292011-12-12 19:34:16 -08001498 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001499 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301500 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1501 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1502 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001503
Imre Deak51cc9ad2018-02-08 19:41:02 +02001504 if (INTEL_GEN(dev_priv) <= 7) {
1505 mutex_lock(&dev_priv->pcu_lock);
1506 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1507 &rc6vids);
1508 mutex_unlock(&dev_priv->pcu_lock);
1509 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001510
Eric Anholtfff24e22012-01-23 16:14:05 -08001511 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001512 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1513 seq_printf(m, "RC6 Enabled: %s\n",
1514 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001515 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301516 seq_printf(m, "Render Well Gating Enabled: %s\n",
1517 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1518 seq_printf(m, "Media Well Gating Enabled: %s\n",
1519 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1520 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001521 seq_printf(m, "Deep RC6 Enabled: %s\n",
1522 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1523 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1524 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001525 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001526 switch (gt_core_status & GEN6_RCn_MASK) {
1527 case GEN6_RC0:
1528 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001529 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001530 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001531 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001532 break;
1533 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001534 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001535 break;
1536 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001537 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001538 break;
1539 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001540 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001541 break;
1542 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001543 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001544 break;
1545 }
1546
1547 seq_printf(m, "Core Power Down: %s\n",
1548 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001549 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301550 seq_printf(m, "Render Power Well: %s\n",
1551 (gen9_powergate_status &
1552 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1553 seq_printf(m, "Media Power Well: %s\n",
1554 (gen9_powergate_status &
1555 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1556 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001557
1558 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001559 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1560 GEN6_GT_GFX_RC6_LOCKED);
1561 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1562 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1563 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001564
Imre Deak51cc9ad2018-02-08 19:41:02 +02001565 if (INTEL_GEN(dev_priv) <= 7) {
1566 seq_printf(m, "RC6 voltage: %dmV\n",
1567 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1568 seq_printf(m, "RC6+ voltage: %dmV\n",
1569 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1570 seq_printf(m, "RC6++ voltage: %dmV\n",
1571 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1572 }
1573
Akash Goelf2dd7572016-06-27 20:10:01 +05301574 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001575}
1576
1577static int i915_drpc_info(struct seq_file *m, void *unused)
1578{
David Weinehall36cdd012016-08-22 13:59:31 +03001579 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001580 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00001581 int err = -ENODEV;
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001582
Chris Wilsond4225a52019-01-14 14:21:23 +00001583 with_intel_runtime_pm(dev_priv, wakeref) {
1584 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1585 err = vlv_drpc_info(m);
1586 else if (INTEL_GEN(dev_priv) >= 6)
1587 err = gen6_drpc_info(m);
1588 else
1589 err = ironlake_drpc_info(m);
1590 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001591
1592 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001593}
1594
Daniel Vetter9a851782015-06-18 10:30:22 +02001595static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1596{
David Weinehall36cdd012016-08-22 13:59:31 +03001597 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001598
1599 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1600 dev_priv->fb_tracking.busy_bits);
1601
1602 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1603 dev_priv->fb_tracking.flip_bits);
1604
1605 return 0;
1606}
1607
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001608static int i915_fbc_status(struct seq_file *m, void *unused)
1609{
David Weinehall36cdd012016-08-22 13:59:31 +03001610 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001611 struct intel_fbc *fbc = &dev_priv->fbc;
Chris Wilsona0371212019-01-14 14:21:14 +00001612 intel_wakeref_t wakeref;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001613
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001614 if (!HAS_FBC(dev_priv))
1615 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001616
Chris Wilsona0371212019-01-14 14:21:14 +00001617 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001618 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001619
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001620 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001621 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001622 else
Chris Wilson31388722017-12-20 20:58:48 +00001623 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1624
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001625 if (intel_fbc_is_active(dev_priv)) {
1626 u32 mask;
1627
1628 if (INTEL_GEN(dev_priv) >= 8)
1629 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1630 else if (INTEL_GEN(dev_priv) >= 7)
1631 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1632 else if (INTEL_GEN(dev_priv) >= 5)
1633 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1634 else if (IS_G4X(dev_priv))
1635 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1636 else
1637 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1638 FBC_STAT_COMPRESSED);
1639
1640 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001641 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001642
Chris Wilson31388722017-12-20 20:58:48 +00001643 mutex_unlock(&fbc->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00001644 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001645
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001646 return 0;
1647}
1648
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001649static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001650{
David Weinehall36cdd012016-08-22 13:59:31 +03001651 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001652
David Weinehall36cdd012016-08-22 13:59:31 +03001653 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001654 return -ENODEV;
1655
Rodrigo Vivida46f932014-08-01 02:04:45 -07001656 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001657
1658 return 0;
1659}
1660
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001661static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001662{
David Weinehall36cdd012016-08-22 13:59:31 +03001663 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001664 u32 reg;
1665
David Weinehall36cdd012016-08-22 13:59:31 +03001666 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001667 return -ENODEV;
1668
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001669 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001670
1671 reg = I915_READ(ILK_DPFC_CONTROL);
1672 dev_priv->fbc.false_color = val;
1673
1674 I915_WRITE(ILK_DPFC_CONTROL, val ?
1675 (reg | FBC_CTL_FALSE_COLOR) :
1676 (reg & ~FBC_CTL_FALSE_COLOR));
1677
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001678 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001679 return 0;
1680}
1681
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001682DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1683 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001684 "%llu\n");
1685
Paulo Zanoni92d44622013-05-31 16:33:24 -03001686static int i915_ips_status(struct seq_file *m, void *unused)
1687{
David Weinehall36cdd012016-08-22 13:59:31 +03001688 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001689 intel_wakeref_t wakeref;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001690
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001691 if (!HAS_IPS(dev_priv))
1692 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001693
Chris Wilsona0371212019-01-14 14:21:14 +00001694 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001695
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001696 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001697 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001698
David Weinehall36cdd012016-08-22 13:59:31 +03001699 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001700 seq_puts(m, "Currently: unknown\n");
1701 } else {
1702 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1703 seq_puts(m, "Currently: enabled\n");
1704 else
1705 seq_puts(m, "Currently: disabled\n");
1706 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001707
Chris Wilsona0371212019-01-14 14:21:14 +00001708 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001709
Paulo Zanoni92d44622013-05-31 16:33:24 -03001710 return 0;
1711}
1712
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001713static int i915_sr_status(struct seq_file *m, void *unused)
1714{
David Weinehall36cdd012016-08-22 13:59:31 +03001715 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001716 intel_wakeref_t wakeref;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001717 bool sr_enabled = false;
1718
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001719 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001720
Chris Wilson7342a722017-03-09 14:20:49 +00001721 if (INTEL_GEN(dev_priv) >= 9)
1722 /* no global SR status; inspect per-plane WM */;
1723 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001724 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001725 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001726 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001727 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001728 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001729 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001730 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001731 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001732 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001733 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001734
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001735 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001736
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001737 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001738
1739 return 0;
1740}
1741
Jesse Barnes7648fa92010-05-20 14:28:11 -07001742static int i915_emon_status(struct seq_file *m, void *unused)
1743{
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001744 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001745 intel_wakeref_t wakeref;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001746
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001747 if (!IS_GEN(i915, 5))
Chris Wilson582be6b2012-04-30 19:35:02 +01001748 return -ENODEV;
1749
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001750 with_intel_runtime_pm(i915, wakeref) {
1751 unsigned long temp, chipset, gfx;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001752
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001753 temp = i915_mch_val(i915);
1754 chipset = i915_chipset_val(i915);
1755 gfx = i915_gfx_val(i915);
Chris Wilsona0371212019-01-14 14:21:14 +00001756
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001757 seq_printf(m, "GMCH temp: %ld\n", temp);
1758 seq_printf(m, "Chipset power: %ld\n", chipset);
1759 seq_printf(m, "GFX power: %ld\n", gfx);
1760 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1761 }
Jesse Barnes7648fa92010-05-20 14:28:11 -07001762
1763 return 0;
1764}
1765
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001766static int i915_ring_freq_table(struct seq_file *m, void *unused)
1767{
David Weinehall36cdd012016-08-22 13:59:31 +03001768 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001769 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301770 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001771 intel_wakeref_t wakeref;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001772 int gpu_freq, ia_freq;
1773 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001774
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001775 if (!HAS_LLC(dev_priv))
1776 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001777
Chris Wilsona0371212019-01-14 14:21:14 +00001778 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001779
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001780 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001781 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001782 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001783
Chris Wilsond586b5f2018-03-08 14:26:48 +00001784 min_gpu_freq = rps->min_freq;
1785 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001786 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301787 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001788 min_gpu_freq /= GEN9_FREQ_SCALER;
1789 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301790 }
1791
Damien Lespiau267f0c92013-06-24 22:59:48 +01001792 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001793
Akash Goelf936ec32015-06-29 14:50:22 +05301794 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001795 ia_freq = gpu_freq;
1796 sandybridge_pcode_read(dev_priv,
1797 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1798 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001799 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301800 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001801 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001802 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001803 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001804 ((ia_freq >> 0) & 0xff) * 100,
1805 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001806 }
1807
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001808 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001809
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001810out:
Chris Wilsona0371212019-01-14 14:21:14 +00001811 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001812 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001813}
1814
Chris Wilson44834a62010-08-19 16:09:23 +01001815static int i915_opregion(struct seq_file *m, void *unused)
1816{
David Weinehall36cdd012016-08-22 13:59:31 +03001817 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1818 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001819 struct intel_opregion *opregion = &dev_priv->opregion;
1820 int ret;
1821
1822 ret = mutex_lock_interruptible(&dev->struct_mutex);
1823 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001824 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001825
Jani Nikula2455a8e2015-12-14 12:50:53 +02001826 if (opregion->header)
1827 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001828
1829 mutex_unlock(&dev->struct_mutex);
1830
Daniel Vetter0d38f002012-04-21 22:49:10 +02001831out:
Chris Wilson44834a62010-08-19 16:09:23 +01001832 return 0;
1833}
1834
Jani Nikulaada8f952015-12-15 13:17:12 +02001835static int i915_vbt(struct seq_file *m, void *unused)
1836{
David Weinehall36cdd012016-08-22 13:59:31 +03001837 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001838
1839 if (opregion->vbt)
1840 seq_write(m, opregion->vbt, opregion->vbt_size);
1841
1842 return 0;
1843}
1844
Chris Wilson37811fc2010-08-25 22:45:57 +01001845static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1846{
David Weinehall36cdd012016-08-22 13:59:31 +03001847 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1848 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301849 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001850 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001851 int ret;
1852
1853 ret = mutex_lock_interruptible(&dev->struct_mutex);
1854 if (ret)
1855 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001856
Daniel Vetter06957262015-08-10 13:34:08 +02001857#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001858 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001859 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001860
Chris Wilson25bcce92016-07-02 15:36:00 +01001861 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1862 fbdev_fb->base.width,
1863 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001864 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001865 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001866 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001867 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001868 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001869 seq_putc(m, '\n');
1870 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001871#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001872
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001873 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001874 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301875 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1876 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001877 continue;
1878
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001879 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001880 fb->base.width,
1881 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001882 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001883 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001884 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001885 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001886 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001887 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001888 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001889 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001890 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001891
1892 return 0;
1893}
1894
Chris Wilson7e37f882016-08-02 22:50:21 +01001895static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001896{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001897 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1898 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001899}
1900
Ben Widawskye76d3632011-03-19 18:14:29 -07001901static int i915_context_status(struct seq_file *m, void *unused)
1902{
David Weinehall36cdd012016-08-22 13:59:31 +03001903 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1904 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001905 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001906 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05301907 enum intel_engine_id id;
Dave Gordonc3232b12016-03-23 18:19:53 +00001908 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001909
Daniel Vetterf3d28872014-05-29 23:23:08 +02001910 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001911 if (ret)
1912 return ret;
1913
Chris Wilson829a0af2017-06-20 12:05:45 +01001914 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson288f1ce2018-09-04 16:31:17 +01001915 seq_puts(m, "HW context ");
1916 if (!list_empty(&ctx->hw_id_link))
1917 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1918 atomic_read(&ctx->hw_id_pin_count));
Chris Wilsonc84455b2016-08-15 10:49:08 +01001919 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001920 struct task_struct *task;
1921
Chris Wilsonc84455b2016-08-15 10:49:08 +01001922 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001923 if (task) {
1924 seq_printf(m, "(%s [%d]) ",
1925 task->comm, task->pid);
1926 put_task_struct(task);
1927 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001928 } else if (IS_ERR(ctx->file_priv)) {
1929 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001930 } else {
1931 seq_puts(m, "(kernel) ");
1932 }
1933
Chris Wilsonbca44d82016-05-24 14:53:41 +01001934 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1935 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001936
Akash Goel3b3f1652016-10-13 22:44:48 +05301937 for_each_engine(engine, dev_priv, id) {
Chris Wilsonab82a062018-04-30 14:15:01 +01001938 struct intel_context *ce =
1939 to_intel_context(ctx, engine);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001940
1941 seq_printf(m, "%s: ", engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001942 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001943 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001944 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001945 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001946 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001947 }
1948
Ben Widawskya33afea2013-09-17 21:12:45 -07001949 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001950 }
1951
Daniel Vetterf3d28872014-05-29 23:23:08 +02001952 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001953
1954 return 0;
1955}
1956
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001957static const char *swizzle_string(unsigned swizzle)
1958{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001959 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001960 case I915_BIT_6_SWIZZLE_NONE:
1961 return "none";
1962 case I915_BIT_6_SWIZZLE_9:
1963 return "bit9";
1964 case I915_BIT_6_SWIZZLE_9_10:
1965 return "bit9/bit10";
1966 case I915_BIT_6_SWIZZLE_9_11:
1967 return "bit9/bit11";
1968 case I915_BIT_6_SWIZZLE_9_10_11:
1969 return "bit9/bit10/bit11";
1970 case I915_BIT_6_SWIZZLE_9_17:
1971 return "bit9/bit17";
1972 case I915_BIT_6_SWIZZLE_9_10_17:
1973 return "bit9/bit10/bit17";
1974 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001975 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001976 }
1977
1978 return "bug";
1979}
1980
1981static int i915_swizzle_info(struct seq_file *m, void *data)
1982{
David Weinehall36cdd012016-08-22 13:59:31 +03001983 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001984 intel_wakeref_t wakeref;
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001985
Chris Wilsona0371212019-01-14 14:21:14 +00001986 wakeref = intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001987
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001988 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1989 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1990 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1991 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1992
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08001993 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001994 seq_printf(m, "DDC = 0x%08x\n",
1995 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01001996 seq_printf(m, "DDC2 = 0x%08x\n",
1997 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001998 seq_printf(m, "C0DRB3 = 0x%04x\n",
1999 I915_READ16(C0DRB3));
2000 seq_printf(m, "C1DRB3 = 0x%04x\n",
2001 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03002002 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002003 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2004 I915_READ(MAD_DIMM_C0));
2005 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2006 I915_READ(MAD_DIMM_C1));
2007 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2008 I915_READ(MAD_DIMM_C2));
2009 seq_printf(m, "TILECTL = 0x%08x\n",
2010 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03002011 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07002012 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2013 I915_READ(GAMTARBMODE));
2014 else
2015 seq_printf(m, "ARB_MODE = 0x%08x\n",
2016 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002017 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2018 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002019 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01002020
2021 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2022 seq_puts(m, "L-shaped memory detected\n");
2023
Chris Wilsona0371212019-01-14 14:21:14 +00002024 intel_runtime_pm_put(dev_priv, wakeref);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002025
2026 return 0;
2027}
2028
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002029static int count_irq_waiters(struct drm_i915_private *i915)
2030{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002031 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302032 enum intel_engine_id id;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002033 int count = 0;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002034
Akash Goel3b3f1652016-10-13 22:44:48 +05302035 for_each_engine(engine, i915, id)
Chris Wilson688e6c72016-07-01 17:23:15 +01002036 count += intel_engine_has_waiter(engine);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002037
2038 return count;
2039}
2040
Chris Wilson7466c292016-08-15 09:49:33 +01002041static const char *rps_power_to_str(unsigned int power)
2042{
2043 static const char * const strings[] = {
2044 [LOW_POWER] = "low power",
2045 [BETWEEN] = "mixed",
2046 [HIGH_POWER] = "high power",
2047 };
2048
2049 if (power >= ARRAY_SIZE(strings) || !strings[power])
2050 return "unknown";
2051
2052 return strings[power];
2053}
2054
Chris Wilson1854d5c2015-04-07 16:20:32 +01002055static int i915_rps_boost_info(struct seq_file *m, void *data)
2056{
David Weinehall36cdd012016-08-22 13:59:31 +03002057 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2058 struct drm_device *dev = &dev_priv->drm;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002059 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002060 u32 act_freq = rps->cur_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00002061 intel_wakeref_t wakeref;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002062 struct drm_file *file;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002063
Chris Wilsond4225a52019-01-14 14:21:23 +00002064 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002065 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2066 mutex_lock(&dev_priv->pcu_lock);
2067 act_freq = vlv_punit_read(dev_priv,
2068 PUNIT_REG_GPU_FREQ_STS);
2069 act_freq = (act_freq >> 8) & 0xff;
2070 mutex_unlock(&dev_priv->pcu_lock);
2071 } else {
2072 act_freq = intel_get_cagf(dev_priv,
2073 I915_READ(GEN6_RPSTAT1));
2074 }
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002075 }
2076
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002077 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002078 seq_printf(m, "GPU busy? %s [%d requests]\n",
2079 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002080 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002081 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002082 atomic_read(&rps->num_waiters));
Chris Wilson60548c52018-07-31 14:26:29 +01002083 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002084 seq_printf(m, "Frequency requested %d, actual %d\n",
2085 intel_gpu_freq(dev_priv, rps->cur_freq),
2086 intel_gpu_freq(dev_priv, act_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002087 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002088 intel_gpu_freq(dev_priv, rps->min_freq),
2089 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2090 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2091 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002092 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002093 intel_gpu_freq(dev_priv, rps->idle_freq),
2094 intel_gpu_freq(dev_priv, rps->efficient_freq),
2095 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002096
2097 mutex_lock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002098 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2099 struct drm_i915_file_private *file_priv = file->driver_priv;
2100 struct task_struct *task;
2101
2102 rcu_read_lock();
2103 task = pid_task(file->pid, PIDTYPE_PID);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002104 seq_printf(m, "%s [%d]: %d boosts\n",
Chris Wilson1854d5c2015-04-07 16:20:32 +01002105 task ? task->comm : "<unknown>",
2106 task ? task->pid : -1,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002107 atomic_read(&file_priv->rps_client.boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002108 rcu_read_unlock();
2109 }
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002110 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002111 atomic_read(&rps->boosts));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002112 mutex_unlock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002113
Chris Wilson7466c292016-08-15 09:49:33 +01002114 if (INTEL_GEN(dev_priv) >= 6 &&
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002115 rps->enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002116 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002117 u32 rpup, rpupei;
2118 u32 rpdown, rpdownei;
2119
2120 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2121 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2122 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2123 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2124 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2125 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2126
2127 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Chris Wilson60548c52018-07-31 14:26:29 +01002128 rps_power_to_str(rps->power.mode));
Chris Wilson7466c292016-08-15 09:49:33 +01002129 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002130 rpup && rpupei ? 100 * rpup / rpupei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002131 rps->power.up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002132 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002133 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002134 rps->power.down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002135 } else {
2136 seq_puts(m, "\nRPS Autotuning inactive\n");
2137 }
2138
Chris Wilson8d3afd72015-05-21 21:01:47 +01002139 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002140}
2141
Ben Widawsky63573eb2013-07-04 11:02:07 -07002142static int i915_llc(struct seq_file *m, void *data)
2143{
David Weinehall36cdd012016-08-22 13:59:31 +03002144 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002145 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002146
David Weinehall36cdd012016-08-22 13:59:31 +03002147 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002148 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2149 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002150
2151 return 0;
2152}
2153
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002154static int i915_huc_load_status_info(struct seq_file *m, void *data)
2155{
2156 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002157 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002158 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002159
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002160 if (!HAS_HUC(dev_priv))
2161 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002162
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002163 p = drm_seq_file_printer(m);
2164 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002165
Chris Wilsond4225a52019-01-14 14:21:23 +00002166 with_intel_runtime_pm(dev_priv, wakeref)
2167 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002168
2169 return 0;
2170}
2171
Alex Daifdf5d352015-08-12 15:43:37 +01002172static int i915_guc_load_status_info(struct seq_file *m, void *data)
2173{
David Weinehall36cdd012016-08-22 13:59:31 +03002174 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002175 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002176 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002177
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002178 if (!HAS_GUC(dev_priv))
2179 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002180
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002181 p = drm_seq_file_printer(m);
2182 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002183
Chris Wilsond4225a52019-01-14 14:21:23 +00002184 with_intel_runtime_pm(dev_priv, wakeref) {
2185 u32 tmp = I915_READ(GUC_STATUS);
2186 u32 i;
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302187
Chris Wilsond4225a52019-01-14 14:21:23 +00002188 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2189 seq_printf(m, "\tBootrom status = 0x%x\n",
2190 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2191 seq_printf(m, "\tuKernel status = 0x%x\n",
2192 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2193 seq_printf(m, "\tMIA Core status = 0x%x\n",
2194 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2195 seq_puts(m, "\nScratch registers:\n");
2196 for (i = 0; i < 16; i++) {
2197 seq_printf(m, "\t%2d: \t0x%x\n",
2198 i, I915_READ(SOFT_SCRATCH(i)));
2199 }
2200 }
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302201
Alex Daifdf5d352015-08-12 15:43:37 +01002202 return 0;
2203}
2204
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002205static const char *
2206stringify_guc_log_type(enum guc_log_buffer_type type)
2207{
2208 switch (type) {
2209 case GUC_ISR_LOG_BUFFER:
2210 return "ISR";
2211 case GUC_DPC_LOG_BUFFER:
2212 return "DPC";
2213 case GUC_CRASH_DUMP_LOG_BUFFER:
2214 return "CRASH";
2215 default:
2216 MISSING_CASE(type);
2217 }
2218
2219 return "";
2220}
2221
Akash Goel5aa1ee42016-10-12 21:54:36 +05302222static void i915_guc_log_info(struct seq_file *m,
2223 struct drm_i915_private *dev_priv)
2224{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002225 struct intel_guc_log *log = &dev_priv->guc.log;
2226 enum guc_log_buffer_type type;
2227
2228 if (!intel_guc_log_relay_enabled(log)) {
2229 seq_puts(m, "GuC log relay disabled\n");
2230 return;
2231 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302232
Michał Winiarskidb557992018-03-19 10:53:43 +01002233 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302234
Michał Winiarski6a96be22018-03-19 10:53:42 +01002235 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002236 log->relay.full_count);
2237
2238 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2239 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2240 stringify_guc_log_type(type),
2241 log->stats[type].flush,
2242 log->stats[type].sampled_overflow);
2243 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302244}
2245
Dave Gordon8b417c22015-08-12 15:43:44 +01002246static void i915_guc_client_info(struct seq_file *m,
2247 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302248 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002249{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002250 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002251 enum intel_engine_id id;
Dave Gordon8b417c22015-08-12 15:43:44 +01002252 uint64_t tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002253
Oscar Mateob09935a2017-03-22 10:39:53 -07002254 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2255 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002256 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2257 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002258
Akash Goel3b3f1652016-10-13 22:44:48 +05302259 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002260 u64 submissions = client->submissions[id];
2261 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002262 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002263 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002264 }
2265 seq_printf(m, "\tTotal: %llu\n", tot);
2266}
2267
2268static int i915_guc_info(struct seq_file *m, void *data)
2269{
David Weinehall36cdd012016-08-22 13:59:31 +03002270 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002271 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002272
Michał Winiarskidb557992018-03-19 10:53:43 +01002273 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002274 return -ENODEV;
2275
Michał Winiarskidb557992018-03-19 10:53:43 +01002276 i915_guc_log_info(m, dev_priv);
2277
2278 if (!USES_GUC_SUBMISSION(dev_priv))
2279 return 0;
2280
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002281 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002282
Michał Winiarskidb557992018-03-19 10:53:43 +01002283 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002284 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002285 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002286
Chris Wilson334636c2016-11-29 12:10:20 +00002287 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2288 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002289 if (guc->preempt_client) {
2290 seq_printf(m, "\nGuC preempt client @ %p:\n",
2291 guc->preempt_client);
2292 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2293 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002294
2295 /* Add more as required ... */
2296
2297 return 0;
2298}
2299
Oscar Mateoa8b93702017-05-10 15:04:51 +00002300static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002301{
David Weinehall36cdd012016-08-22 13:59:31 +03002302 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002303 const struct intel_guc *guc = &dev_priv->guc;
2304 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302305 struct intel_guc_client *client = guc->execbuf_client;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002306 unsigned int tmp;
2307 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002308
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002309 if (!USES_GUC_SUBMISSION(dev_priv))
2310 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002311
Oscar Mateoa8b93702017-05-10 15:04:51 +00002312 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2313 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002314
Oscar Mateoa8b93702017-05-10 15:04:51 +00002315 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2316 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002317
Oscar Mateoa8b93702017-05-10 15:04:51 +00002318 seq_printf(m, "GuC stage descriptor %u:\n", index);
2319 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2320 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2321 seq_printf(m, "\tPriority: %d\n", desc->priority);
2322 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2323 seq_printf(m, "\tEngines used: 0x%x\n",
2324 desc->engines_used);
2325 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2326 desc->db_trigger_phy,
2327 desc->db_trigger_cpu,
2328 desc->db_trigger_uk);
2329 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2330 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002331 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002332 desc->wq_addr, desc->wq_size);
2333 seq_putc(m, '\n');
2334
2335 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2336 u32 guc_engine_id = engine->guc_id;
2337 struct guc_execlist_context *lrc =
2338 &desc->lrc[guc_engine_id];
2339
2340 seq_printf(m, "\t%s LRC:\n", engine->name);
2341 seq_printf(m, "\t\tContext desc: 0x%x\n",
2342 lrc->context_desc);
2343 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2344 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2345 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2346 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2347 seq_putc(m, '\n');
2348 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002349 }
2350
Oscar Mateoa8b93702017-05-10 15:04:51 +00002351 return 0;
2352}
2353
Alex Dai4c7e77f2015-08-12 15:43:40 +01002354static int i915_guc_log_dump(struct seq_file *m, void *data)
2355{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002356 struct drm_info_node *node = m->private;
2357 struct drm_i915_private *dev_priv = node_to_i915(node);
2358 bool dump_load_err = !!node->info_ent->data;
2359 struct drm_i915_gem_object *obj = NULL;
2360 u32 *log;
2361 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002362
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002363 if (!HAS_GUC(dev_priv))
2364 return -ENODEV;
2365
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002366 if (dump_load_err)
2367 obj = dev_priv->guc.load_err_log;
2368 else if (dev_priv->guc.log.vma)
2369 obj = dev_priv->guc.log.vma->obj;
2370
2371 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002372 return 0;
2373
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002374 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2375 if (IS_ERR(log)) {
2376 DRM_DEBUG("Failed to pin object\n");
2377 seq_puts(m, "(log data unaccessible)\n");
2378 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002379 }
2380
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002381 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2382 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2383 *(log + i), *(log + i + 1),
2384 *(log + i + 2), *(log + i + 3));
2385
Alex Dai4c7e77f2015-08-12 15:43:40 +01002386 seq_putc(m, '\n');
2387
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002388 i915_gem_object_unpin_map(obj);
2389
Alex Dai4c7e77f2015-08-12 15:43:40 +01002390 return 0;
2391}
2392
Michał Winiarski4977a282018-03-19 10:53:40 +01002393static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302394{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002395 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302396
Michał Winiarski86aa8242018-03-08 16:46:53 +01002397 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002398 return -ENODEV;
2399
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002400 *val = intel_guc_log_get_level(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302401
2402 return 0;
2403}
2404
Michał Winiarski4977a282018-03-19 10:53:40 +01002405static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302406{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002407 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302408
Michał Winiarski86aa8242018-03-08 16:46:53 +01002409 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002410 return -ENODEV;
2411
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002412 return intel_guc_log_set_level(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302413}
2414
Michał Winiarski4977a282018-03-19 10:53:40 +01002415DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2416 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302417 "%lld\n");
2418
Michał Winiarski4977a282018-03-19 10:53:40 +01002419static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2420{
2421 struct drm_i915_private *dev_priv = inode->i_private;
2422
2423 if (!USES_GUC(dev_priv))
2424 return -ENODEV;
2425
2426 file->private_data = &dev_priv->guc.log;
2427
2428 return intel_guc_log_relay_open(&dev_priv->guc.log);
2429}
2430
2431static ssize_t
2432i915_guc_log_relay_write(struct file *filp,
2433 const char __user *ubuf,
2434 size_t cnt,
2435 loff_t *ppos)
2436{
2437 struct intel_guc_log *log = filp->private_data;
2438
2439 intel_guc_log_relay_flush(log);
2440
2441 return cnt;
2442}
2443
2444static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2445{
2446 struct drm_i915_private *dev_priv = inode->i_private;
2447
2448 intel_guc_log_relay_close(&dev_priv->guc.log);
2449
2450 return 0;
2451}
2452
2453static const struct file_operations i915_guc_log_relay_fops = {
2454 .owner = THIS_MODULE,
2455 .open = i915_guc_log_relay_open,
2456 .write = i915_guc_log_relay_write,
2457 .release = i915_guc_log_relay_release,
2458};
2459
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002460static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2461{
2462 u8 val;
2463 static const char * const sink_status[] = {
2464 "inactive",
2465 "transition to active, capture and display",
2466 "active, display from RFB",
2467 "active, capture and display on sink device timings",
2468 "transition to inactive, capture and display, timing re-sync",
2469 "reserved",
2470 "reserved",
2471 "sink internal error",
2472 };
2473 struct drm_connector *connector = m->private;
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002474 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002475 struct intel_dp *intel_dp =
2476 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002477 int ret;
2478
2479 if (!CAN_PSR(dev_priv)) {
2480 seq_puts(m, "PSR Unsupported\n");
2481 return -ENODEV;
2482 }
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002483
2484 if (connector->status != connector_status_connected)
2485 return -ENODEV;
2486
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002487 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2488
2489 if (ret == 1) {
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002490 const char *str = "unknown";
2491
2492 val &= DP_PSR_SINK_STATE_MASK;
2493 if (val < ARRAY_SIZE(sink_status))
2494 str = sink_status[val];
2495 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2496 } else {
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002497 return ret;
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002498 }
2499
2500 return 0;
2501}
2502DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2503
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302504static void
2505psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002506{
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302507 u32 val, psr_status;
Chris Wilsonb86bef202017-01-16 13:06:21 +00002508
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302509 if (dev_priv->psr.psr2_enabled) {
2510 static const char * const live_status[] = {
2511 "IDLE",
2512 "CAPTURE",
2513 "CAPTURE_FS",
2514 "SLEEP",
2515 "BUFON_FW",
2516 "ML_UP",
2517 "SU_STANDBY",
2518 "FAST_SLEEP",
2519 "DEEP_SLEEP",
2520 "BUF_ON",
2521 "TG_ON"
2522 };
2523 psr_status = I915_READ(EDP_PSR2_STATUS);
2524 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2525 EDP_PSR2_STATUS_STATE_SHIFT;
2526 if (val < ARRAY_SIZE(live_status)) {
2527 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2528 psr_status, live_status[val]);
2529 return;
2530 }
2531 } else {
2532 static const char * const live_status[] = {
2533 "IDLE",
2534 "SRDONACK",
2535 "SRDENT",
2536 "BUFOFF",
2537 "BUFON",
2538 "AUXACK",
2539 "SRDOFFACK",
2540 "SRDENT_ON",
2541 };
2542 psr_status = I915_READ(EDP_PSR_STATUS);
2543 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2544 EDP_PSR_STATUS_STATE_SHIFT;
2545 if (val < ARRAY_SIZE(live_status)) {
2546 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2547 psr_status, live_status[val]);
2548 return;
2549 }
2550 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002551
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302552 seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
Chris Wilsonb86bef202017-01-16 13:06:21 +00002553}
2554
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002555static int i915_edp_psr_status(struct seq_file *m, void *data)
2556{
David Weinehall36cdd012016-08-22 13:59:31 +03002557 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002558 intel_wakeref_t wakeref;
Rodrigo Vivia031d702013-10-03 16:15:06 -03002559 u32 psrperf = 0;
2560 bool enabled = false;
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002561 bool sink_support;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002562
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002563 if (!HAS_PSR(dev_priv))
2564 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002565
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002566 sink_support = dev_priv->psr.sink_support;
2567 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2568 if (!sink_support)
2569 return 0;
2570
Chris Wilsona0371212019-01-14 14:21:14 +00002571 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002572
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002573 mutex_lock(&dev_priv->psr.lock);
Azhar Shaikh0577ab42018-08-22 10:23:48 -07002574 seq_printf(m, "PSR mode: %s\n",
2575 dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002576 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002577 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2578 dev_priv->psr.busy_frontbuffer_bits);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002579
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002580 if (dev_priv->psr.psr2_enabled)
2581 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2582 else
2583 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002584
2585 seq_printf(m, "Main link in standby mode: %s\n",
2586 yesno(dev_priv->psr.link_standby));
2587
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002588 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002589
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002590 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002591 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2592 */
David Weinehall36cdd012016-08-22 13:59:31 +03002593 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Ville Syrjälä443a3892015-11-11 20:34:15 +02002594 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
Rodrigo Vivia031d702013-10-03 16:15:06 -03002595 EDP_PSR_PERF_CNT_MASK;
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002596
2597 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2598 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302599
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302600 psr_source_status(dev_priv, m);
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002601 mutex_unlock(&dev_priv->psr.lock);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002602
Dhinakaran Pandiyan9844d4b2018-08-21 15:11:55 -07002603 if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002604 seq_printf(m, "Last attempted entry at: %lld\n",
2605 dev_priv->psr.last_entry_attempt);
2606 seq_printf(m, "Last exit at: %lld\n",
2607 dev_priv->psr.last_exit);
2608 }
2609
Chris Wilsona0371212019-01-14 14:21:14 +00002610 intel_runtime_pm_put(dev_priv, wakeref);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002611 return 0;
2612}
2613
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002614static int
2615i915_edp_psr_debug_set(void *data, u64 val)
2616{
2617 struct drm_i915_private *dev_priv = data;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002618 struct drm_modeset_acquire_ctx ctx;
Chris Wilsona0371212019-01-14 14:21:14 +00002619 intel_wakeref_t wakeref;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002620 int ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002621
2622 if (!CAN_PSR(dev_priv))
2623 return -ENODEV;
2624
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002625 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002626
Chris Wilsona0371212019-01-14 14:21:14 +00002627 wakeref = intel_runtime_pm_get(dev_priv);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002628
2629 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2630
2631retry:
2632 ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2633 if (ret == -EDEADLK) {
2634 ret = drm_modeset_backoff(&ctx);
2635 if (!ret)
2636 goto retry;
2637 }
2638
2639 drm_modeset_drop_locks(&ctx);
2640 drm_modeset_acquire_fini(&ctx);
2641
Chris Wilsona0371212019-01-14 14:21:14 +00002642 intel_runtime_pm_put(dev_priv, wakeref);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002643
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002644 return ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002645}
2646
2647static int
2648i915_edp_psr_debug_get(void *data, u64 *val)
2649{
2650 struct drm_i915_private *dev_priv = data;
2651
2652 if (!CAN_PSR(dev_priv))
2653 return -ENODEV;
2654
2655 *val = READ_ONCE(dev_priv->psr.debug);
2656 return 0;
2657}
2658
2659DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2660 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2661 "%llu\n");
2662
Jesse Barnesec013e72013-08-20 10:29:23 +01002663static int i915_energy_uJ(struct seq_file *m, void *data)
2664{
David Weinehall36cdd012016-08-22 13:59:31 +03002665 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002666 unsigned long long power;
Chris Wilsona0371212019-01-14 14:21:14 +00002667 intel_wakeref_t wakeref;
Jesse Barnesec013e72013-08-20 10:29:23 +01002668 u32 units;
2669
David Weinehall36cdd012016-08-22 13:59:31 +03002670 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002671 return -ENODEV;
2672
Chris Wilsond4225a52019-01-14 14:21:23 +00002673 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002674 return -ENODEV;
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002675
2676 units = (power & 0x1f00) >> 8;
Chris Wilsond4225a52019-01-14 14:21:23 +00002677 with_intel_runtime_pm(dev_priv, wakeref)
2678 power = I915_READ(MCH_SECP_NRG_STTS);
2679
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002680 power = (1000000 * power) >> units; /* convert to uJ */
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002681 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002682
2683 return 0;
2684}
2685
Damien Lespiau6455c872015-06-04 18:23:57 +01002686static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002687{
David Weinehall36cdd012016-08-22 13:59:31 +03002688 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002689 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002690
Chris Wilsona156e642016-04-03 14:14:21 +01002691 if (!HAS_RUNTIME_PM(dev_priv))
2692 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002693
Chris Wilson25c896bd2019-01-14 14:21:25 +00002694 seq_printf(m, "Runtime power status: %s\n",
2695 enableddisabled(!dev_priv->power_domains.wakeref));
2696
Chris Wilson6f561032018-01-24 11:36:07 +00002697 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2698 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
Paulo Zanoni371db662013-08-19 13:18:10 -03002699 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002700 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002701#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002702 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002703 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002704#else
2705 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2706#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002707 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002708 pci_power_name(pdev->current_state),
2709 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002710
Chris Wilsonbd780f32019-01-14 14:21:09 +00002711 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2712 struct drm_printer p = drm_seq_file_printer(m);
2713
2714 print_intel_runtime_pm_wakeref(dev_priv, &p);
2715 }
2716
Jesse Barnesec013e72013-08-20 10:29:23 +01002717 return 0;
2718}
2719
Imre Deak1da51582013-11-25 17:15:35 +02002720static int i915_power_domain_info(struct seq_file *m, void *unused)
2721{
David Weinehall36cdd012016-08-22 13:59:31 +03002722 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002723 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2724 int i;
2725
2726 mutex_lock(&power_domains->lock);
2727
2728 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2729 for (i = 0; i < power_domains->power_well_count; i++) {
2730 struct i915_power_well *power_well;
2731 enum intel_display_power_domain power_domain;
2732
2733 power_well = &power_domains->power_wells[i];
Imre Deakf28ec6f2018-08-06 12:58:37 +03002734 seq_printf(m, "%-25s %d\n", power_well->desc->name,
Imre Deak1da51582013-11-25 17:15:35 +02002735 power_well->count);
2736
Imre Deakf28ec6f2018-08-06 12:58:37 +03002737 for_each_power_domain(power_domain, power_well->desc->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002738 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002739 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002740 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002741 }
2742
2743 mutex_unlock(&power_domains->lock);
2744
2745 return 0;
2746}
2747
Damien Lespiaub7cec662015-10-27 14:47:01 +02002748static int i915_dmc_info(struct seq_file *m, void *unused)
2749{
David Weinehall36cdd012016-08-22 13:59:31 +03002750 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002751 intel_wakeref_t wakeref;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002752 struct intel_csr *csr;
2753
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002754 if (!HAS_CSR(dev_priv))
2755 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002756
2757 csr = &dev_priv->csr;
2758
Chris Wilsona0371212019-01-14 14:21:14 +00002759 wakeref = intel_runtime_pm_get(dev_priv);
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002760
Damien Lespiaub7cec662015-10-27 14:47:01 +02002761 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2762 seq_printf(m, "path: %s\n", csr->fw_path);
2763
2764 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002765 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002766
2767 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2768 CSR_VERSION_MINOR(csr->version));
2769
Imre Deak34b2f8d2018-10-31 22:02:20 +02002770 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2771 goto out;
2772
2773 seq_printf(m, "DC3 -> DC5 count: %d\n",
2774 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2775 SKL_CSR_DC3_DC5_COUNT));
2776 if (!IS_GEN9_LP(dev_priv))
Damien Lespiau83372062015-10-30 17:53:32 +02002777 seq_printf(m, "DC5 -> DC6 count: %d\n",
2778 I915_READ(SKL_CSR_DC5_DC6_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002779
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002780out:
2781 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2782 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2783 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2784
Chris Wilsona0371212019-01-14 14:21:14 +00002785 intel_runtime_pm_put(dev_priv, wakeref);
Damien Lespiau83372062015-10-30 17:53:32 +02002786
Damien Lespiaub7cec662015-10-27 14:47:01 +02002787 return 0;
2788}
2789
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002790static void intel_seq_print_mode(struct seq_file *m, int tabs,
2791 struct drm_display_mode *mode)
2792{
2793 int i;
2794
2795 for (i = 0; i < tabs; i++)
2796 seq_putc(m, '\t');
2797
2798 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2799 mode->base.id, mode->name,
2800 mode->vrefresh, mode->clock,
2801 mode->hdisplay, mode->hsync_start,
2802 mode->hsync_end, mode->htotal,
2803 mode->vdisplay, mode->vsync_start,
2804 mode->vsync_end, mode->vtotal,
2805 mode->type, mode->flags);
2806}
2807
2808static void intel_encoder_info(struct seq_file *m,
2809 struct intel_crtc *intel_crtc,
2810 struct intel_encoder *intel_encoder)
2811{
David Weinehall36cdd012016-08-22 13:59:31 +03002812 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2813 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002814 struct drm_crtc *crtc = &intel_crtc->base;
2815 struct intel_connector *intel_connector;
2816 struct drm_encoder *encoder;
2817
2818 encoder = &intel_encoder->base;
2819 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002820 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002821 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2822 struct drm_connector *connector = &intel_connector->base;
2823 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2824 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002825 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002826 drm_get_connector_status_name(connector->status));
2827 if (connector->status == connector_status_connected) {
2828 struct drm_display_mode *mode = &crtc->mode;
2829 seq_printf(m, ", mode:\n");
2830 intel_seq_print_mode(m, 2, mode);
2831 } else {
2832 seq_putc(m, '\n');
2833 }
2834 }
2835}
2836
2837static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2838{
David Weinehall36cdd012016-08-22 13:59:31 +03002839 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2840 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002841 struct drm_crtc *crtc = &intel_crtc->base;
2842 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002843 struct drm_plane_state *plane_state = crtc->primary->state;
2844 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002845
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002846 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002847 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002848 fb->base.id, plane_state->src_x >> 16,
2849 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002850 else
2851 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002852 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2853 intel_encoder_info(m, intel_crtc, intel_encoder);
2854}
2855
2856static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2857{
2858 struct drm_display_mode *mode = panel->fixed_mode;
2859
2860 seq_printf(m, "\tfixed mode:\n");
2861 intel_seq_print_mode(m, 2, mode);
2862}
2863
2864static void intel_dp_info(struct seq_file *m,
2865 struct intel_connector *intel_connector)
2866{
2867 struct intel_encoder *intel_encoder = intel_connector->encoder;
2868 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2869
2870 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03002871 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02002872 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002873 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03002874
2875 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2876 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002877}
2878
Libin Yang9a148a92016-11-28 20:07:05 +08002879static void intel_dp_mst_info(struct seq_file *m,
2880 struct intel_connector *intel_connector)
2881{
2882 struct intel_encoder *intel_encoder = intel_connector->encoder;
2883 struct intel_dp_mst_encoder *intel_mst =
2884 enc_to_mst(&intel_encoder->base);
2885 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2886 struct intel_dp *intel_dp = &intel_dig_port->dp;
2887 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2888 intel_connector->port);
2889
2890 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2891}
2892
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002893static void intel_hdmi_info(struct seq_file *m,
2894 struct intel_connector *intel_connector)
2895{
2896 struct intel_encoder *intel_encoder = intel_connector->encoder;
2897 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2898
Jani Nikula742f4912015-09-03 11:16:09 +03002899 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002900}
2901
2902static void intel_lvds_info(struct seq_file *m,
2903 struct intel_connector *intel_connector)
2904{
2905 intel_panel_info(m, &intel_connector->panel);
2906}
2907
2908static void intel_connector_info(struct seq_file *m,
2909 struct drm_connector *connector)
2910{
2911 struct intel_connector *intel_connector = to_intel_connector(connector);
2912 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08002913 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002914
2915 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03002916 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002917 drm_get_connector_status_name(connector->status));
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002918
2919 if (connector->status == connector_status_disconnected)
2920 return;
2921
2922 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2923 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2924 connector->display_info.width_mm,
2925 connector->display_info.height_mm);
2926 seq_printf(m, "\tsubpixel order: %s\n",
2927 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2928 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002929
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02002930 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002931 return;
2932
2933 switch (connector->connector_type) {
2934 case DRM_MODE_CONNECTOR_DisplayPort:
2935 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08002936 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2937 intel_dp_mst_info(m, intel_connector);
2938 else
2939 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002940 break;
2941 case DRM_MODE_CONNECTOR_LVDS:
2942 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10002943 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002944 break;
2945 case DRM_MODE_CONNECTOR_HDMIA:
2946 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03002947 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002948 intel_hdmi_info(m, intel_connector);
2949 break;
2950 default:
2951 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10002952 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002953
Jesse Barnesf103fc72014-02-20 12:39:57 -08002954 seq_printf(m, "\tmodes:\n");
2955 list_for_each_entry(mode, &connector->modes, head)
2956 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002957}
2958
Robert Fekete3abc4e02015-10-27 16:58:32 +01002959static const char *plane_type(enum drm_plane_type type)
2960{
2961 switch (type) {
2962 case DRM_PLANE_TYPE_OVERLAY:
2963 return "OVL";
2964 case DRM_PLANE_TYPE_PRIMARY:
2965 return "PRI";
2966 case DRM_PLANE_TYPE_CURSOR:
2967 return "CUR";
2968 /*
2969 * Deliberately omitting default: to generate compiler warnings
2970 * when a new drm_plane_type gets added.
2971 */
2972 }
2973
2974 return "unknown";
2975}
2976
Jani Nikula5852a152019-01-07 16:51:49 +02002977static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
Robert Fekete3abc4e02015-10-27 16:58:32 +01002978{
Robert Fekete3abc4e02015-10-27 16:58:32 +01002979 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04002980 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01002981 * will print them all to visualize if the values are misused
2982 */
Jani Nikula5852a152019-01-07 16:51:49 +02002983 snprintf(buf, bufsize,
Robert Fekete3abc4e02015-10-27 16:58:32 +01002984 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04002985 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2986 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2987 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2988 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2989 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2990 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01002991 rotation);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002992}
2993
2994static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2995{
David Weinehall36cdd012016-08-22 13:59:31 +03002996 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2997 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01002998 struct intel_plane *intel_plane;
2999
3000 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3001 struct drm_plane_state *state;
3002 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003003 struct drm_format_name_buf format_name;
Jani Nikula5852a152019-01-07 16:51:49 +02003004 char rot_str[48];
Robert Fekete3abc4e02015-10-27 16:58:32 +01003005
3006 if (!plane->state) {
3007 seq_puts(m, "plane->state is NULL!\n");
3008 continue;
3009 }
3010
3011 state = plane->state;
3012
Eric Engestrom90844f02016-08-15 01:02:38 +01003013 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003014 drm_get_format_name(state->fb->format->format,
3015 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01003016 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003017 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01003018 }
3019
Jani Nikula5852a152019-01-07 16:51:49 +02003020 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
3021
Robert Fekete3abc4e02015-10-27 16:58:32 +01003022 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3023 plane->base.id,
3024 plane_type(intel_plane->base.type),
3025 state->crtc_x, state->crtc_y,
3026 state->crtc_w, state->crtc_h,
3027 (state->src_x >> 16),
3028 ((state->src_x & 0xffff) * 15625) >> 10,
3029 (state->src_y >> 16),
3030 ((state->src_y & 0xffff) * 15625) >> 10,
3031 (state->src_w >> 16),
3032 ((state->src_w & 0xffff) * 15625) >> 10,
3033 (state->src_h >> 16),
3034 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003035 format_name.str,
Jani Nikula5852a152019-01-07 16:51:49 +02003036 rot_str);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003037 }
3038}
3039
3040static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3041{
3042 struct intel_crtc_state *pipe_config;
3043 int num_scalers = intel_crtc->num_scalers;
3044 int i;
3045
3046 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3047
3048 /* Not all platformas have a scaler */
3049 if (num_scalers) {
3050 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3051 num_scalers,
3052 pipe_config->scaler_state.scaler_users,
3053 pipe_config->scaler_state.scaler_id);
3054
A.Sunil Kamath58415912016-11-20 23:20:26 +05303055 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003056 struct intel_scaler *sc =
3057 &pipe_config->scaler_state.scalers[i];
3058
3059 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3060 i, yesno(sc->in_use), sc->mode);
3061 }
3062 seq_puts(m, "\n");
3063 } else {
3064 seq_puts(m, "\tNo scalers available on this platform\n");
3065 }
3066}
3067
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003068static int i915_display_info(struct seq_file *m, void *unused)
3069{
David Weinehall36cdd012016-08-22 13:59:31 +03003070 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3071 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec22014-03-12 09:13:13 +00003072 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003073 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003074 struct drm_connector_list_iter conn_iter;
Chris Wilsona0371212019-01-14 14:21:14 +00003075 intel_wakeref_t wakeref;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003076
Chris Wilsona0371212019-01-14 14:21:14 +00003077 wakeref = intel_runtime_pm_get(dev_priv);
3078
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003079 seq_printf(m, "CRTC info\n");
3080 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003081 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003082 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003083
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003084 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003085 pipe_config = to_intel_crtc_state(crtc->base.state);
3086
Robert Fekete3abc4e02015-10-27 16:58:32 +01003087 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec22014-03-12 09:13:13 +00003088 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003089 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003090 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3091 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3092
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003093 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003094 struct intel_plane *cursor =
3095 to_intel_plane(crtc->base.cursor);
3096
Chris Wilson065f2ec22014-03-12 09:13:13 +00003097 intel_crtc_info(m, crtc);
3098
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003099 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3100 yesno(cursor->base.state->visible),
3101 cursor->base.state->crtc_x,
3102 cursor->base.state->crtc_y,
3103 cursor->base.state->crtc_w,
3104 cursor->base.state->crtc_h,
3105 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003106 intel_scaler_info(m, crtc);
3107 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003108 }
Daniel Vettercace8412014-05-22 17:56:31 +02003109
3110 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3111 yesno(!crtc->cpu_fifo_underrun_disabled),
3112 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003113 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003114 }
3115
3116 seq_printf(m, "\n");
3117 seq_printf(m, "Connector info\n");
3118 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003119 mutex_lock(&dev->mode_config.mutex);
3120 drm_connector_list_iter_begin(dev, &conn_iter);
3121 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003122 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003123 drm_connector_list_iter_end(&conn_iter);
3124 mutex_unlock(&dev->mode_config.mutex);
3125
Chris Wilsona0371212019-01-14 14:21:14 +00003126 intel_runtime_pm_put(dev_priv, wakeref);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003127
3128 return 0;
3129}
3130
Chris Wilson1b365952016-10-04 21:11:31 +01003131static int i915_engine_info(struct seq_file *m, void *unused)
3132{
3133 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3134 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +00003135 intel_wakeref_t wakeref;
Akash Goel3b3f1652016-10-13 22:44:48 +05303136 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003137 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003138
Chris Wilsona0371212019-01-14 14:21:14 +00003139 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01003140
Chris Wilson6f561032018-01-24 11:36:07 +00003141 seq_printf(m, "GT awake? %s (epoch %u)\n",
3142 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003143 seq_printf(m, "Global active requests: %d\n",
3144 dev_priv->gt.active_requests);
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003145 seq_printf(m, "CS timestamp frequency: %u kHz\n",
Jani Nikula02584042018-12-31 16:56:41 +02003146 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003147
Chris Wilsonf636edb2017-10-09 12:02:57 +01003148 p = drm_seq_file_printer(m);
3149 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003150 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003151
Chris Wilsona0371212019-01-14 14:21:14 +00003152 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilson9c870d02016-10-24 13:42:15 +01003153
Chris Wilson1b365952016-10-04 21:11:31 +01003154 return 0;
3155}
3156
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003157static int i915_rcs_topology(struct seq_file *m, void *unused)
3158{
3159 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3160 struct drm_printer p = drm_seq_file_printer(m);
3161
Jani Nikula02584042018-12-31 16:56:41 +02003162 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003163
3164 return 0;
3165}
3166
Chris Wilsonc5418a82017-10-13 21:26:19 +01003167static int i915_shrinker_info(struct seq_file *m, void *unused)
3168{
3169 struct drm_i915_private *i915 = node_to_i915(m->private);
3170
3171 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3172 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3173
3174 return 0;
3175}
3176
Daniel Vetter728e29d2014-06-25 22:01:53 +03003177static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3178{
David Weinehall36cdd012016-08-22 13:59:31 +03003179 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3180 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003181 int i;
3182
3183 drm_modeset_lock_all(dev);
3184 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3185 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3186
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003187 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003188 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003189 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003190 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003191 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003192 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003193 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003194 pll->state.hw_state.dpll_md);
3195 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3196 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3197 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07003198 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3199 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3200 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3201 pll->state.hw_state.mg_refclkin_ctl);
3202 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3203 pll->state.hw_state.mg_clktop2_coreclkctl1);
3204 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3205 pll->state.hw_state.mg_clktop2_hsclkctl);
3206 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3207 pll->state.hw_state.mg_pll_div0);
3208 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3209 pll->state.hw_state.mg_pll_div1);
3210 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3211 pll->state.hw_state.mg_pll_lf);
3212 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3213 pll->state.hw_state.mg_pll_frac_lock);
3214 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3215 pll->state.hw_state.mg_pll_ssc);
3216 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3217 pll->state.hw_state.mg_pll_bias);
3218 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3219 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003220 }
3221 drm_modeset_unlock_all(dev);
3222
3223 return 0;
3224}
3225
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003226static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003227{
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003228 struct drm_i915_private *i915 = node_to_i915(m->private);
3229 const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3230 struct i915_wa *wa;
3231 unsigned int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003232
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003233 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3234 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
Chris Wilson548764b2018-06-15 13:02:07 +01003235 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003236 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
Arun Siluvery888b5992014-08-26 14:44:51 +01003237
3238 return 0;
3239}
3240
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303241static int i915_ipc_status_show(struct seq_file *m, void *data)
3242{
3243 struct drm_i915_private *dev_priv = m->private;
3244
3245 seq_printf(m, "Isochronous Priority Control: %s\n",
3246 yesno(dev_priv->ipc_enabled));
3247 return 0;
3248}
3249
3250static int i915_ipc_status_open(struct inode *inode, struct file *file)
3251{
3252 struct drm_i915_private *dev_priv = inode->i_private;
3253
3254 if (!HAS_IPC(dev_priv))
3255 return -ENODEV;
3256
3257 return single_open(file, i915_ipc_status_show, dev_priv);
3258}
3259
3260static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3261 size_t len, loff_t *offp)
3262{
3263 struct seq_file *m = file->private_data;
3264 struct drm_i915_private *dev_priv = m->private;
Chris Wilsona0371212019-01-14 14:21:14 +00003265 intel_wakeref_t wakeref;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303266 bool enable;
Chris Wilsond4225a52019-01-14 14:21:23 +00003267 int ret;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303268
3269 ret = kstrtobool_from_user(ubuf, len, &enable);
3270 if (ret < 0)
3271 return ret;
3272
Chris Wilsond4225a52019-01-14 14:21:23 +00003273 with_intel_runtime_pm(dev_priv, wakeref) {
3274 if (!dev_priv->ipc_enabled && enable)
3275 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3276 dev_priv->wm.distrust_bios_wm = true;
3277 dev_priv->ipc_enabled = enable;
3278 intel_enable_ipc(dev_priv);
3279 }
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303280
3281 return len;
3282}
3283
3284static const struct file_operations i915_ipc_status_fops = {
3285 .owner = THIS_MODULE,
3286 .open = i915_ipc_status_open,
3287 .read = seq_read,
3288 .llseek = seq_lseek,
3289 .release = single_release,
3290 .write = i915_ipc_status_write
3291};
3292
Damien Lespiauc5511e42014-11-04 17:06:51 +00003293static int i915_ddb_info(struct seq_file *m, void *unused)
3294{
David Weinehall36cdd012016-08-22 13:59:31 +03003295 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3296 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003297 struct skl_ddb_entry *entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003298 struct intel_crtc *crtc;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003299
David Weinehall36cdd012016-08-22 13:59:31 +03003300 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003301 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003302
Damien Lespiauc5511e42014-11-04 17:06:51 +00003303 drm_modeset_lock_all(dev);
3304
Damien Lespiauc5511e42014-11-04 17:06:51 +00003305 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3306
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003307 for_each_intel_crtc(&dev_priv->drm, crtc) {
3308 struct intel_crtc_state *crtc_state =
3309 to_intel_crtc_state(crtc->base.state);
3310 enum pipe pipe = crtc->pipe;
3311 enum plane_id plane_id;
3312
Damien Lespiauc5511e42014-11-04 17:06:51 +00003313 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3314
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003315 for_each_plane_id_on_crtc(crtc, plane_id) {
3316 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3317 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
Damien Lespiauc5511e42014-11-04 17:06:51 +00003318 entry->start, entry->end,
3319 skl_ddb_entry_size(entry));
3320 }
3321
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003322 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003323 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3324 entry->end, skl_ddb_entry_size(entry));
3325 }
3326
3327 drm_modeset_unlock_all(dev);
3328
3329 return 0;
3330}
3331
Vandana Kannana54746e2015-03-03 20:53:10 +05303332static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003333 struct drm_device *dev,
3334 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303335{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003336 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303337 struct i915_drrs *drrs = &dev_priv->drrs;
3338 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003339 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003340 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303341
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003342 drm_connector_list_iter_begin(dev, &conn_iter);
3343 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003344 if (connector->state->crtc != &intel_crtc->base)
3345 continue;
3346
3347 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303348 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003349 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303350
3351 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3352 seq_puts(m, "\tVBT: DRRS_type: Static");
3353 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3354 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3355 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3356 seq_puts(m, "\tVBT: DRRS_type: None");
3357 else
3358 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3359
3360 seq_puts(m, "\n\n");
3361
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003362 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303363 struct intel_panel *panel;
3364
3365 mutex_lock(&drrs->mutex);
3366 /* DRRS Supported */
3367 seq_puts(m, "\tDRRS Supported: Yes\n");
3368
3369 /* disable_drrs() will make drrs->dp NULL */
3370 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303371 seq_puts(m, "Idleness DRRS: Disabled\n");
3372 if (dev_priv->psr.enabled)
3373 seq_puts(m,
3374 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303375 mutex_unlock(&drrs->mutex);
3376 return;
3377 }
3378
3379 panel = &drrs->dp->attached_connector->panel;
3380 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3381 drrs->busy_frontbuffer_bits);
3382
3383 seq_puts(m, "\n\t\t");
3384 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3385 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3386 vrefresh = panel->fixed_mode->vrefresh;
3387 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3388 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3389 vrefresh = panel->downclock_mode->vrefresh;
3390 } else {
3391 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3392 drrs->refresh_rate_type);
3393 mutex_unlock(&drrs->mutex);
3394 return;
3395 }
3396 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3397
3398 seq_puts(m, "\n\t\t");
3399 mutex_unlock(&drrs->mutex);
3400 } else {
3401 /* DRRS not supported. Print the VBT parameter*/
3402 seq_puts(m, "\tDRRS Supported : No");
3403 }
3404 seq_puts(m, "\n");
3405}
3406
3407static int i915_drrs_status(struct seq_file *m, void *unused)
3408{
David Weinehall36cdd012016-08-22 13:59:31 +03003409 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3410 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303411 struct intel_crtc *intel_crtc;
3412 int active_crtc_cnt = 0;
3413
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003414 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303415 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003416 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303417 active_crtc_cnt++;
3418 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3419
3420 drrs_status_per_crtc(m, dev, intel_crtc);
3421 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303422 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003423 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303424
3425 if (!active_crtc_cnt)
3426 seq_puts(m, "No active crtc found\n");
3427
3428 return 0;
3429}
3430
Dave Airlie11bed952014-05-12 15:22:27 +10003431static int i915_dp_mst_info(struct seq_file *m, void *unused)
3432{
David Weinehall36cdd012016-08-22 13:59:31 +03003433 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3434 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003435 struct intel_encoder *intel_encoder;
3436 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003437 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003438 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003439
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003440 drm_connector_list_iter_begin(dev, &conn_iter);
3441 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003442 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003443 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003444
3445 intel_encoder = intel_attached_encoder(connector);
3446 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3447 continue;
3448
3449 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003450 if (!intel_dig_port->dp.can_mst)
3451 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003452
Jim Bride40ae80c2016-04-14 10:18:37 -07003453 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003454 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003455 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3456 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003457 drm_connector_list_iter_end(&conn_iter);
3458
Dave Airlie11bed952014-05-12 15:22:27 +10003459 return 0;
3460}
3461
Todd Previteeb3394fa2015-04-18 00:04:19 -07003462static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003463 const char __user *ubuf,
3464 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003465{
3466 char *input_buffer;
3467 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003468 struct drm_device *dev;
3469 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003470 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003471 struct intel_dp *intel_dp;
3472 int val = 0;
3473
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303474 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003475
Todd Previteeb3394fa2015-04-18 00:04:19 -07003476 if (len == 0)
3477 return 0;
3478
Geliang Tang261aeba2017-05-06 23:40:17 +08003479 input_buffer = memdup_user_nul(ubuf, len);
3480 if (IS_ERR(input_buffer))
3481 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003482
Todd Previteeb3394fa2015-04-18 00:04:19 -07003483 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3484
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003485 drm_connector_list_iter_begin(dev, &conn_iter);
3486 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003487 struct intel_encoder *encoder;
3488
Todd Previteeb3394fa2015-04-18 00:04:19 -07003489 if (connector->connector_type !=
3490 DRM_MODE_CONNECTOR_DisplayPort)
3491 continue;
3492
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003493 encoder = to_intel_encoder(connector->encoder);
3494 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3495 continue;
3496
3497 if (encoder && connector->status == connector_status_connected) {
3498 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003499 status = kstrtoint(input_buffer, 10, &val);
3500 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003501 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003502 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3503 /* To prevent erroneous activation of the compliance
3504 * testing code, only accept an actual value of 1 here
3505 */
3506 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003507 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003508 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003509 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003510 }
3511 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003512 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003513 kfree(input_buffer);
3514 if (status < 0)
3515 return status;
3516
3517 *offp += len;
3518 return len;
3519}
3520
3521static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3522{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003523 struct drm_i915_private *dev_priv = m->private;
3524 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003525 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003526 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003527 struct intel_dp *intel_dp;
3528
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003529 drm_connector_list_iter_begin(dev, &conn_iter);
3530 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003531 struct intel_encoder *encoder;
3532
Todd Previteeb3394fa2015-04-18 00:04:19 -07003533 if (connector->connector_type !=
3534 DRM_MODE_CONNECTOR_DisplayPort)
3535 continue;
3536
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003537 encoder = to_intel_encoder(connector->encoder);
3538 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3539 continue;
3540
3541 if (encoder && connector->status == connector_status_connected) {
3542 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003543 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003544 seq_puts(m, "1");
3545 else
3546 seq_puts(m, "0");
3547 } else
3548 seq_puts(m, "0");
3549 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003550 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003551
3552 return 0;
3553}
3554
3555static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003556 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003557{
David Weinehall36cdd012016-08-22 13:59:31 +03003558 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003559 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003560}
3561
3562static const struct file_operations i915_displayport_test_active_fops = {
3563 .owner = THIS_MODULE,
3564 .open = i915_displayport_test_active_open,
3565 .read = seq_read,
3566 .llseek = seq_lseek,
3567 .release = single_release,
3568 .write = i915_displayport_test_active_write
3569};
3570
3571static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3572{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003573 struct drm_i915_private *dev_priv = m->private;
3574 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003575 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003576 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003577 struct intel_dp *intel_dp;
3578
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003579 drm_connector_list_iter_begin(dev, &conn_iter);
3580 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003581 struct intel_encoder *encoder;
3582
Todd Previteeb3394fa2015-04-18 00:04:19 -07003583 if (connector->connector_type !=
3584 DRM_MODE_CONNECTOR_DisplayPort)
3585 continue;
3586
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003587 encoder = to_intel_encoder(connector->encoder);
3588 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3589 continue;
3590
3591 if (encoder && connector->status == connector_status_connected) {
3592 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003593 if (intel_dp->compliance.test_type ==
3594 DP_TEST_LINK_EDID_READ)
3595 seq_printf(m, "%lx",
3596 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003597 else if (intel_dp->compliance.test_type ==
3598 DP_TEST_LINK_VIDEO_PATTERN) {
3599 seq_printf(m, "hdisplay: %d\n",
3600 intel_dp->compliance.test_data.hdisplay);
3601 seq_printf(m, "vdisplay: %d\n",
3602 intel_dp->compliance.test_data.vdisplay);
3603 seq_printf(m, "bpc: %u\n",
3604 intel_dp->compliance.test_data.bpc);
3605 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003606 } else
3607 seq_puts(m, "0");
3608 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003609 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003610
3611 return 0;
3612}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003613DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003614
3615static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3616{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003617 struct drm_i915_private *dev_priv = m->private;
3618 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003619 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003620 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003621 struct intel_dp *intel_dp;
3622
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003623 drm_connector_list_iter_begin(dev, &conn_iter);
3624 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003625 struct intel_encoder *encoder;
3626
Todd Previteeb3394fa2015-04-18 00:04:19 -07003627 if (connector->connector_type !=
3628 DRM_MODE_CONNECTOR_DisplayPort)
3629 continue;
3630
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003631 encoder = to_intel_encoder(connector->encoder);
3632 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3633 continue;
3634
3635 if (encoder && connector->status == connector_status_connected) {
3636 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003637 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003638 } else
3639 seq_puts(m, "0");
3640 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003641 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003642
3643 return 0;
3644}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003645DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003646
Damien Lespiau97e94b22014-11-04 17:06:50 +00003647static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003648{
David Weinehall36cdd012016-08-22 13:59:31 +03003649 struct drm_i915_private *dev_priv = m->private;
3650 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003651 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003652 int num_levels;
3653
David Weinehall36cdd012016-08-22 13:59:31 +03003654 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003655 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003656 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003657 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003658 else if (IS_G4X(dev_priv))
3659 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003660 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003661 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003662
3663 drm_modeset_lock_all(dev);
3664
3665 for (level = 0; level < num_levels; level++) {
3666 unsigned int latency = wm[level];
3667
Damien Lespiau97e94b22014-11-04 17:06:50 +00003668 /*
3669 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003670 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003671 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003672 if (INTEL_GEN(dev_priv) >= 9 ||
3673 IS_VALLEYVIEW(dev_priv) ||
3674 IS_CHERRYVIEW(dev_priv) ||
3675 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003676 latency *= 10;
3677 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003678 latency *= 5;
3679
3680 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003681 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003682 }
3683
3684 drm_modeset_unlock_all(dev);
3685}
3686
3687static int pri_wm_latency_show(struct seq_file *m, void *data)
3688{
David Weinehall36cdd012016-08-22 13:59:31 +03003689 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003690 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003691
David Weinehall36cdd012016-08-22 13:59:31 +03003692 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003693 latencies = dev_priv->wm.skl_latency;
3694 else
David Weinehall36cdd012016-08-22 13:59:31 +03003695 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003696
3697 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003698
3699 return 0;
3700}
3701
3702static int spr_wm_latency_show(struct seq_file *m, void *data)
3703{
David Weinehall36cdd012016-08-22 13:59:31 +03003704 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003705 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003706
David Weinehall36cdd012016-08-22 13:59:31 +03003707 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003708 latencies = dev_priv->wm.skl_latency;
3709 else
David Weinehall36cdd012016-08-22 13:59:31 +03003710 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003711
3712 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003713
3714 return 0;
3715}
3716
3717static int cur_wm_latency_show(struct seq_file *m, void *data)
3718{
David Weinehall36cdd012016-08-22 13:59:31 +03003719 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003720 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003721
David Weinehall36cdd012016-08-22 13:59:31 +03003722 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003723 latencies = dev_priv->wm.skl_latency;
3724 else
David Weinehall36cdd012016-08-22 13:59:31 +03003725 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003726
3727 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003728
3729 return 0;
3730}
3731
3732static int pri_wm_latency_open(struct inode *inode, struct file *file)
3733{
David Weinehall36cdd012016-08-22 13:59:31 +03003734 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003735
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003736 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003737 return -ENODEV;
3738
David Weinehall36cdd012016-08-22 13:59:31 +03003739 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003740}
3741
3742static int spr_wm_latency_open(struct inode *inode, struct file *file)
3743{
David Weinehall36cdd012016-08-22 13:59:31 +03003744 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003745
David Weinehall36cdd012016-08-22 13:59:31 +03003746 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003747 return -ENODEV;
3748
David Weinehall36cdd012016-08-22 13:59:31 +03003749 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003750}
3751
3752static int cur_wm_latency_open(struct inode *inode, struct file *file)
3753{
David Weinehall36cdd012016-08-22 13:59:31 +03003754 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003755
David Weinehall36cdd012016-08-22 13:59:31 +03003756 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003757 return -ENODEV;
3758
David Weinehall36cdd012016-08-22 13:59:31 +03003759 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003760}
3761
3762static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Damien Lespiau97e94b22014-11-04 17:06:50 +00003763 size_t len, loff_t *offp, uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003764{
3765 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003766 struct drm_i915_private *dev_priv = m->private;
3767 struct drm_device *dev = &dev_priv->drm;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003768 uint16_t new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003769 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003770 int level;
3771 int ret;
3772 char tmp[32];
3773
David Weinehall36cdd012016-08-22 13:59:31 +03003774 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003775 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003776 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003777 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003778 else if (IS_G4X(dev_priv))
3779 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003780 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003781 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003782
Ville Syrjälä369a1342014-01-22 14:36:08 +02003783 if (len >= sizeof(tmp))
3784 return -EINVAL;
3785
3786 if (copy_from_user(tmp, ubuf, len))
3787 return -EFAULT;
3788
3789 tmp[len] = '\0';
3790
Damien Lespiau97e94b22014-11-04 17:06:50 +00003791 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3792 &new[0], &new[1], &new[2], &new[3],
3793 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003794 if (ret != num_levels)
3795 return -EINVAL;
3796
3797 drm_modeset_lock_all(dev);
3798
3799 for (level = 0; level < num_levels; level++)
3800 wm[level] = new[level];
3801
3802 drm_modeset_unlock_all(dev);
3803
3804 return len;
3805}
3806
3807
3808static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3809 size_t len, loff_t *offp)
3810{
3811 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003812 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003813 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003814
David Weinehall36cdd012016-08-22 13:59:31 +03003815 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003816 latencies = dev_priv->wm.skl_latency;
3817 else
David Weinehall36cdd012016-08-22 13:59:31 +03003818 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003819
3820 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003821}
3822
3823static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3824 size_t len, loff_t *offp)
3825{
3826 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003827 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003828 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003829
David Weinehall36cdd012016-08-22 13:59:31 +03003830 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003831 latencies = dev_priv->wm.skl_latency;
3832 else
David Weinehall36cdd012016-08-22 13:59:31 +03003833 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003834
3835 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003836}
3837
3838static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3839 size_t len, loff_t *offp)
3840{
3841 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003842 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003843 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003844
David Weinehall36cdd012016-08-22 13:59:31 +03003845 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003846 latencies = dev_priv->wm.skl_latency;
3847 else
David Weinehall36cdd012016-08-22 13:59:31 +03003848 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003849
3850 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003851}
3852
3853static const struct file_operations i915_pri_wm_latency_fops = {
3854 .owner = THIS_MODULE,
3855 .open = pri_wm_latency_open,
3856 .read = seq_read,
3857 .llseek = seq_lseek,
3858 .release = single_release,
3859 .write = pri_wm_latency_write
3860};
3861
3862static const struct file_operations i915_spr_wm_latency_fops = {
3863 .owner = THIS_MODULE,
3864 .open = spr_wm_latency_open,
3865 .read = seq_read,
3866 .llseek = seq_lseek,
3867 .release = single_release,
3868 .write = spr_wm_latency_write
3869};
3870
3871static const struct file_operations i915_cur_wm_latency_fops = {
3872 .owner = THIS_MODULE,
3873 .open = cur_wm_latency_open,
3874 .read = seq_read,
3875 .llseek = seq_lseek,
3876 .release = single_release,
3877 .write = cur_wm_latency_write
3878};
3879
Kees Cook647416f2013-03-10 14:10:06 -07003880static int
3881i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003882{
David Weinehall36cdd012016-08-22 13:59:31 +03003883 struct drm_i915_private *dev_priv = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003884
Chris Wilsond98c52c2016-04-13 17:35:05 +01003885 *val = i915_terminally_wedged(&dev_priv->gpu_error);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003886
Kees Cook647416f2013-03-10 14:10:06 -07003887 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003888}
3889
Kees Cook647416f2013-03-10 14:10:06 -07003890static int
3891i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003892{
Chris Wilson598b6b52017-03-25 13:47:35 +00003893 struct drm_i915_private *i915 = data;
3894 struct intel_engine_cs *engine;
3895 unsigned int tmp;
Imre Deakd46c0512014-04-14 20:24:27 +03003896
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003897 /*
3898 * There is no safeguard against this debugfs entry colliding
3899 * with the hangcheck calling same i915_handle_error() in
3900 * parallel, causing an explosion. For now we assume that the
3901 * test harness is responsible enough not to inject gpu hangs
3902 * while it is writing to 'i915_wedged'
3903 */
3904
Chris Wilson598b6b52017-03-25 13:47:35 +00003905 if (i915_reset_backoff(&i915->gpu_error))
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003906 return -EAGAIN;
3907
Chris Wilson598b6b52017-03-25 13:47:35 +00003908 for_each_engine_masked(engine, i915, val, tmp) {
3909 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
3910 engine->hangcheck.stalled = true;
3911 }
Imre Deakd46c0512014-04-14 20:24:27 +03003912
Chris Wilsonce800752018-03-20 10:04:49 +00003913 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3914 "Manually set wedged engine mask = %llx", val);
Chris Wilson598b6b52017-03-25 13:47:35 +00003915
3916 wait_on_bit(&i915->gpu_error.flags,
Chris Wilsond3df42b2017-03-16 17:13:05 +00003917 I915_RESET_HANDOFF,
3918 TASK_UNINTERRUPTIBLE);
3919
Kees Cook647416f2013-03-10 14:10:06 -07003920 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003921}
3922
Kees Cook647416f2013-03-10 14:10:06 -07003923DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3924 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03003925 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003926
Kees Cook647416f2013-03-10 14:10:06 -07003927static int
Chris Wilson64486ae2017-03-07 15:59:08 +00003928fault_irq_set(struct drm_i915_private *i915,
3929 unsigned long *irq,
3930 unsigned long val)
3931{
3932 int err;
3933
3934 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
3935 if (err)
3936 return err;
3937
3938 err = i915_gem_wait_for_idle(i915,
3939 I915_WAIT_LOCKED |
Chris Wilsonec625fb2018-07-09 13:20:42 +01003940 I915_WAIT_INTERRUPTIBLE,
3941 MAX_SCHEDULE_TIMEOUT);
Chris Wilson64486ae2017-03-07 15:59:08 +00003942 if (err)
3943 goto err_unlock;
3944
Chris Wilson64486ae2017-03-07 15:59:08 +00003945 *irq = val;
3946 mutex_unlock(&i915->drm.struct_mutex);
3947
3948 /* Flush idle worker to disarm irq */
Chris Wilson7c262402017-10-06 11:40:38 +01003949 drain_delayed_work(&i915->gt.idle_work);
Chris Wilson64486ae2017-03-07 15:59:08 +00003950
3951 return 0;
3952
3953err_unlock:
3954 mutex_unlock(&i915->drm.struct_mutex);
3955 return err;
3956}
3957
3958static int
Chris Wilson094f9a52013-09-25 17:34:55 +01003959i915_ring_missed_irq_get(void *data, u64 *val)
3960{
David Weinehall36cdd012016-08-22 13:59:31 +03003961 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01003962
3963 *val = dev_priv->gpu_error.missed_irq_rings;
3964 return 0;
3965}
3966
3967static int
3968i915_ring_missed_irq_set(void *data, u64 val)
3969{
Chris Wilson64486ae2017-03-07 15:59:08 +00003970 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01003971
Chris Wilson64486ae2017-03-07 15:59:08 +00003972 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01003973}
3974
3975DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
3976 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
3977 "0x%08llx\n");
3978
3979static int
3980i915_ring_test_irq_get(void *data, u64 *val)
3981{
David Weinehall36cdd012016-08-22 13:59:31 +03003982 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01003983
3984 *val = dev_priv->gpu_error.test_irq_rings;
3985
3986 return 0;
3987}
3988
3989static int
3990i915_ring_test_irq_set(void *data, u64 val)
3991{
Chris Wilson64486ae2017-03-07 15:59:08 +00003992 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01003993
Chris Wilson5f521722018-09-07 12:28:51 +01003994 /* GuC keeps the user interrupt permanently enabled for submission */
3995 if (USES_GUC_SUBMISSION(i915))
3996 return -ENODEV;
3997
3998 /*
3999 * From icl, we can no longer individually mask interrupt generation
4000 * from each engine.
4001 */
4002 if (INTEL_GEN(i915) >= 11)
4003 return -ENODEV;
4004
Chris Wilson64486ae2017-03-07 15:59:08 +00004005 val &= INTEL_INFO(i915)->ring_mask;
Chris Wilson094f9a52013-09-25 17:34:55 +01004006 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004007
Chris Wilson64486ae2017-03-07 15:59:08 +00004008 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004009}
4010
4011DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4012 i915_ring_test_irq_get, i915_ring_test_irq_set,
4013 "0x%08llx\n");
4014
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004015#define DROP_UNBOUND BIT(0)
4016#define DROP_BOUND BIT(1)
4017#define DROP_RETIRE BIT(2)
4018#define DROP_ACTIVE BIT(3)
4019#define DROP_FREED BIT(4)
4020#define DROP_SHRINK_ALL BIT(5)
4021#define DROP_IDLE BIT(6)
Chris Wilson6b048702018-09-03 09:33:37 +01004022#define DROP_RESET_ACTIVE BIT(7)
4023#define DROP_RESET_SEQNO BIT(8)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004024#define DROP_ALL (DROP_UNBOUND | \
4025 DROP_BOUND | \
4026 DROP_RETIRE | \
4027 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00004028 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004029 DROP_SHRINK_ALL |\
Chris Wilson6b048702018-09-03 09:33:37 +01004030 DROP_IDLE | \
4031 DROP_RESET_ACTIVE | \
4032 DROP_RESET_SEQNO)
Kees Cook647416f2013-03-10 14:10:06 -07004033static int
4034i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004035{
Kees Cook647416f2013-03-10 14:10:06 -07004036 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00004037
Kees Cook647416f2013-03-10 14:10:06 -07004038 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004039}
4040
Kees Cook647416f2013-03-10 14:10:06 -07004041static int
4042i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004043{
Chris Wilson6b048702018-09-03 09:33:37 +01004044 struct drm_i915_private *i915 = data;
Chris Wilsona0371212019-01-14 14:21:14 +00004045 intel_wakeref_t wakeref;
Chris Wilson00c26cf2017-05-24 17:26:53 +01004046 int ret = 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004047
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004048 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4049 val, val & DROP_ALL);
Chris Wilsona0371212019-01-14 14:21:14 +00004050 wakeref = intel_runtime_pm_get(i915);
Chris Wilsondd624af2013-01-15 12:39:35 +00004051
Chris Wilson6b048702018-09-03 09:33:37 +01004052 if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4053 i915_gem_set_wedged(i915);
4054
Chris Wilsondd624af2013-01-15 12:39:35 +00004055 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4056 * on ioctls on -EAGAIN. */
Chris Wilson6b048702018-09-03 09:33:37 +01004057 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4058 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00004059 if (ret)
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03004060 goto out;
Chris Wilsondd624af2013-01-15 12:39:35 +00004061
Chris Wilson00c26cf2017-05-24 17:26:53 +01004062 if (val & DROP_ACTIVE)
Chris Wilson6b048702018-09-03 09:33:37 +01004063 ret = i915_gem_wait_for_idle(i915,
Chris Wilson00c26cf2017-05-24 17:26:53 +01004064 I915_WAIT_INTERRUPTIBLE |
Chris Wilsonec625fb2018-07-09 13:20:42 +01004065 I915_WAIT_LOCKED,
4066 MAX_SCHEDULE_TIMEOUT);
Chris Wilson00c26cf2017-05-24 17:26:53 +01004067
Chris Wilson6b048702018-09-03 09:33:37 +01004068 if (val & DROP_RETIRE)
4069 i915_retire_requests(i915);
4070
4071 mutex_unlock(&i915->drm.struct_mutex);
4072 }
4073
4074 if (val & DROP_RESET_ACTIVE &&
4075 i915_terminally_wedged(&i915->gpu_error)) {
4076 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4077 wait_on_bit(&i915->gpu_error.flags,
4078 I915_RESET_HANDOFF,
4079 TASK_UNINTERRUPTIBLE);
Chris Wilson00c26cf2017-05-24 17:26:53 +01004080 }
Chris Wilsondd624af2013-01-15 12:39:35 +00004081
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004082 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01004083 if (val & DROP_BOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01004084 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01004085
Chris Wilson21ab4e72014-09-09 11:16:08 +01004086 if (val & DROP_UNBOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01004087 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00004088
Chris Wilson8eadc192017-03-08 14:46:22 +00004089 if (val & DROP_SHRINK_ALL)
Chris Wilson6b048702018-09-03 09:33:37 +01004090 i915_gem_shrink_all(i915);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004091 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00004092
Chris Wilson4dfacb02018-05-31 09:22:43 +01004093 if (val & DROP_IDLE) {
4094 do {
Chris Wilson6b048702018-09-03 09:33:37 +01004095 if (READ_ONCE(i915->gt.active_requests))
4096 flush_delayed_work(&i915->gt.retire_work);
4097 drain_delayed_work(&i915->gt.idle_work);
4098 } while (READ_ONCE(i915->gt.awake));
Chris Wilson4dfacb02018-05-31 09:22:43 +01004099 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004100
Chris Wilsonc9c704712018-02-19 22:06:31 +00004101 if (val & DROP_FREED)
Chris Wilson6b048702018-09-03 09:33:37 +01004102 i915_gem_drain_freed_objects(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004103
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03004104out:
Chris Wilsona0371212019-01-14 14:21:14 +00004105 intel_runtime_pm_put(i915, wakeref);
Chris Wilson9d3eb2c2018-10-15 12:58:56 +01004106
Kees Cook647416f2013-03-10 14:10:06 -07004107 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004108}
4109
Kees Cook647416f2013-03-10 14:10:06 -07004110DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4111 i915_drop_caches_get, i915_drop_caches_set,
4112 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00004113
Kees Cook647416f2013-03-10 14:10:06 -07004114static int
Kees Cook647416f2013-03-10 14:10:06 -07004115i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004116{
David Weinehall36cdd012016-08-22 13:59:31 +03004117 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00004118 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00004119 u32 snpcr = 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004120
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08004121 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004122 return -ENODEV;
4123
Chris Wilsond4225a52019-01-14 14:21:23 +00004124 with_intel_runtime_pm(dev_priv, wakeref)
4125 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004126
Kees Cook647416f2013-03-10 14:10:06 -07004127 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004128
Kees Cook647416f2013-03-10 14:10:06 -07004129 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004130}
4131
Kees Cook647416f2013-03-10 14:10:06 -07004132static int
4133i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004134{
David Weinehall36cdd012016-08-22 13:59:31 +03004135 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00004136 intel_wakeref_t wakeref;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004137
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08004138 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004139 return -ENODEV;
4140
Kees Cook647416f2013-03-10 14:10:06 -07004141 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004142 return -EINVAL;
4143
Kees Cook647416f2013-03-10 14:10:06 -07004144 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Chris Wilsond4225a52019-01-14 14:21:23 +00004145 with_intel_runtime_pm(dev_priv, wakeref) {
4146 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004147
Chris Wilsond4225a52019-01-14 14:21:23 +00004148 /* Update the cache sharing policy here as well */
4149 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4150 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4151 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
4152 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4153 }
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004154
Kees Cook647416f2013-03-10 14:10:06 -07004155 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004156}
4157
Kees Cook647416f2013-03-10 14:10:06 -07004158DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4159 i915_cache_sharing_get, i915_cache_sharing_set,
4160 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004161
David Weinehall36cdd012016-08-22 13:59:31 +03004162static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004163 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004164{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004165#define SS_MAX 2
4166 const int ss_max = SS_MAX;
4167 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004168 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004169
4170 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4171 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4172 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4173 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4174
4175 for (ss = 0; ss < ss_max; ss++) {
4176 unsigned int eu_cnt;
4177
4178 if (sig1[ss] & CHV_SS_PG_ENABLE)
4179 /* skip disabled subslice */
4180 continue;
4181
Imre Deakf08a0c92016-08-31 19:13:04 +03004182 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004183 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004184 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4185 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4186 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4187 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004188 sseu->eu_total += eu_cnt;
4189 sseu->eu_per_subslice = max_t(unsigned int,
4190 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004191 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004192#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004193}
4194
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004195static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4196 struct sseu_dev_info *sseu)
4197{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004198#define SS_MAX 6
Jani Nikula02584042018-12-31 16:56:41 +02004199 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004200 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004201 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004202
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004203 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004204 /*
4205 * FIXME: Valid SS Mask respects the spec and read
Alexandre Belloni3c64ea82018-11-20 16:14:15 +01004206 * only valid bits for those registers, excluding reserved
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004207 * although this seems wrong because it would leave many
4208 * subslices without ACK.
4209 */
4210 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4211 GEN10_PGCTL_VALID_SS_MASK(s);
4212 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4213 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4214 }
4215
4216 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4217 GEN9_PGCTL_SSA_EU19_ACK |
4218 GEN9_PGCTL_SSA_EU210_ACK |
4219 GEN9_PGCTL_SSA_EU311_ACK;
4220 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4221 GEN9_PGCTL_SSB_EU19_ACK |
4222 GEN9_PGCTL_SSB_EU210_ACK |
4223 GEN9_PGCTL_SSB_EU311_ACK;
4224
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004225 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004226 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4227 /* skip disabled slice */
4228 continue;
4229
4230 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004231 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004232
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004233 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004234 unsigned int eu_cnt;
4235
4236 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4237 /* skip disabled subslice */
4238 continue;
4239
4240 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4241 eu_mask[ss % 2]);
4242 sseu->eu_total += eu_cnt;
4243 sseu->eu_per_subslice = max_t(unsigned int,
4244 sseu->eu_per_subslice,
4245 eu_cnt);
4246 }
4247 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004248#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004249}
4250
David Weinehall36cdd012016-08-22 13:59:31 +03004251static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004252 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004253{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004254#define SS_MAX 3
Jani Nikula02584042018-12-31 16:56:41 +02004255 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004256 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004257 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004258
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004259 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004260 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4261 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4262 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4263 }
4264
Jeff McGee5d395252015-04-03 18:13:17 -07004265 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4266 GEN9_PGCTL_SSA_EU19_ACK |
4267 GEN9_PGCTL_SSA_EU210_ACK |
4268 GEN9_PGCTL_SSA_EU311_ACK;
4269 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4270 GEN9_PGCTL_SSB_EU19_ACK |
4271 GEN9_PGCTL_SSB_EU210_ACK |
4272 GEN9_PGCTL_SSB_EU311_ACK;
4273
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004274 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004275 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4276 /* skip disabled slice */
4277 continue;
4278
Imre Deakf08a0c92016-08-31 19:13:04 +03004279 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004280
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004281 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004282 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004283 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004284
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004285 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004286 unsigned int eu_cnt;
4287
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004288 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004289 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4290 /* skip disabled subslice */
4291 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004292
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004293 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004294 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004295
Jeff McGee5d395252015-04-03 18:13:17 -07004296 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4297 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004298 sseu->eu_total += eu_cnt;
4299 sseu->eu_per_subslice = max_t(unsigned int,
4300 sseu->eu_per_subslice,
4301 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004302 }
4303 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004304#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004305}
4306
David Weinehall36cdd012016-08-22 13:59:31 +03004307static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004308 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004309{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004310 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004311 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004312
Imre Deakf08a0c92016-08-31 19:13:04 +03004313 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004314
Imre Deakf08a0c92016-08-31 19:13:04 +03004315 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004316 sseu->eu_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004317 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004318 for (s = 0; s < fls(sseu->slice_mask); s++) {
4319 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004320 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004321 }
Imre Deak57ec1712016-08-31 19:13:05 +03004322 sseu->eu_total = sseu->eu_per_subslice *
4323 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004324
4325 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004326 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004327 u8 subslice_7eu =
Jani Nikula02584042018-12-31 16:56:41 +02004328 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004329
Imre Deak915490d2016-08-31 19:13:01 +03004330 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004331 }
4332 }
4333}
4334
Imre Deak615d8902016-08-31 19:13:03 +03004335static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4336 const struct sseu_dev_info *sseu)
4337{
4338 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4339 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004340 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004341
Imre Deakc67ba532016-08-31 19:13:06 +03004342 seq_printf(m, " %s Slice Mask: %04x\n", type,
4343 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004344 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004345 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004346 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004347 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004348 for (s = 0; s < fls(sseu->slice_mask); s++) {
4349 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4350 s, hweight8(sseu->subslice_mask[s]));
4351 }
Imre Deak615d8902016-08-31 19:13:03 +03004352 seq_printf(m, " %s EU Total: %u\n", type,
4353 sseu->eu_total);
4354 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4355 sseu->eu_per_subslice);
4356
4357 if (!is_available_info)
4358 return;
4359
4360 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4361 if (HAS_POOLED_EU(dev_priv))
4362 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4363
4364 seq_printf(m, " Has Slice Power Gating: %s\n",
4365 yesno(sseu->has_slice_pg));
4366 seq_printf(m, " Has Subslice Power Gating: %s\n",
4367 yesno(sseu->has_subslice_pg));
4368 seq_printf(m, " Has EU Power Gating: %s\n",
4369 yesno(sseu->has_eu_pg));
4370}
4371
Jeff McGee38732182015-02-13 10:27:54 -06004372static int i915_sseu_status(struct seq_file *m, void *unused)
4373{
David Weinehall36cdd012016-08-22 13:59:31 +03004374 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004375 struct sseu_dev_info sseu;
Chris Wilsona0371212019-01-14 14:21:14 +00004376 intel_wakeref_t wakeref;
Jeff McGee38732182015-02-13 10:27:54 -06004377
David Weinehall36cdd012016-08-22 13:59:31 +03004378 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004379 return -ENODEV;
4380
4381 seq_puts(m, "SSEU Device Info\n");
Jani Nikula02584042018-12-31 16:56:41 +02004382 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004383
Jeff McGee7f992ab2015-02-13 10:27:55 -06004384 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004385 memset(&sseu, 0, sizeof(sseu));
Jani Nikula02584042018-12-31 16:56:41 +02004386 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4387 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004388 sseu.max_eus_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004389 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004390
Chris Wilsond4225a52019-01-14 14:21:23 +00004391 with_intel_runtime_pm(dev_priv, wakeref) {
4392 if (IS_CHERRYVIEW(dev_priv))
4393 cherryview_sseu_device_status(dev_priv, &sseu);
4394 else if (IS_BROADWELL(dev_priv))
4395 broadwell_sseu_device_status(dev_priv, &sseu);
4396 else if (IS_GEN(dev_priv, 9))
4397 gen9_sseu_device_status(dev_priv, &sseu);
4398 else if (INTEL_GEN(dev_priv) >= 10)
4399 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004400 }
David Weinehall238010e2016-08-01 17:33:27 +03004401
Imre Deak615d8902016-08-31 19:13:03 +03004402 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004403
Jeff McGee38732182015-02-13 10:27:54 -06004404 return 0;
4405}
4406
Ben Widawsky6d794d42011-04-25 11:25:56 -07004407static int i915_forcewake_open(struct inode *inode, struct file *file)
4408{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004409 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004410
Chris Wilsond7a133d2017-09-07 14:44:41 +01004411 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004412 return 0;
4413
Chris Wilsond7a133d2017-09-07 14:44:41 +01004414 intel_runtime_pm_get(i915);
4415 intel_uncore_forcewake_user_get(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004416
4417 return 0;
4418}
4419
Ben Widawskyc43b5632012-04-16 14:07:40 -07004420static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004421{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004422 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004423
Chris Wilsond7a133d2017-09-07 14:44:41 +01004424 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004425 return 0;
4426
Chris Wilsond7a133d2017-09-07 14:44:41 +01004427 intel_uncore_forcewake_user_put(i915);
Chris Wilson16e4dd032019-01-14 14:21:10 +00004428 intel_runtime_pm_put_unchecked(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004429
4430 return 0;
4431}
4432
4433static const struct file_operations i915_forcewake_fops = {
4434 .owner = THIS_MODULE,
4435 .open = i915_forcewake_open,
4436 .release = i915_forcewake_release,
4437};
4438
Lyude317eaa92017-02-03 21:18:25 -05004439static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4440{
4441 struct drm_i915_private *dev_priv = m->private;
4442 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4443
Lyude Paul6fc5d782018-11-20 19:37:17 -05004444 /* Synchronize with everything first in case there's been an HPD
4445 * storm, but we haven't finished handling it in the kernel yet
4446 */
4447 synchronize_irq(dev_priv->drm.irq);
4448 flush_work(&dev_priv->hotplug.dig_port_work);
4449 flush_work(&dev_priv->hotplug.hotplug_work);
4450
Lyude317eaa92017-02-03 21:18:25 -05004451 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4452 seq_printf(m, "Detected: %s\n",
4453 yesno(delayed_work_pending(&hotplug->reenable_work)));
4454
4455 return 0;
4456}
4457
4458static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4459 const char __user *ubuf, size_t len,
4460 loff_t *offp)
4461{
4462 struct seq_file *m = file->private_data;
4463 struct drm_i915_private *dev_priv = m->private;
4464 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4465 unsigned int new_threshold;
4466 int i;
4467 char *newline;
4468 char tmp[16];
4469
4470 if (len >= sizeof(tmp))
4471 return -EINVAL;
4472
4473 if (copy_from_user(tmp, ubuf, len))
4474 return -EFAULT;
4475
4476 tmp[len] = '\0';
4477
4478 /* Strip newline, if any */
4479 newline = strchr(tmp, '\n');
4480 if (newline)
4481 *newline = '\0';
4482
4483 if (strcmp(tmp, "reset") == 0)
4484 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4485 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4486 return -EINVAL;
4487
4488 if (new_threshold > 0)
4489 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4490 new_threshold);
4491 else
4492 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4493
4494 spin_lock_irq(&dev_priv->irq_lock);
4495 hotplug->hpd_storm_threshold = new_threshold;
4496 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4497 for_each_hpd_pin(i)
4498 hotplug->stats[i].count = 0;
4499 spin_unlock_irq(&dev_priv->irq_lock);
4500
4501 /* Re-enable hpd immediately if we were in an irq storm */
4502 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4503
4504 return len;
4505}
4506
4507static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4508{
4509 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4510}
4511
4512static const struct file_operations i915_hpd_storm_ctl_fops = {
4513 .owner = THIS_MODULE,
4514 .open = i915_hpd_storm_ctl_open,
4515 .read = seq_read,
4516 .llseek = seq_lseek,
4517 .release = single_release,
4518 .write = i915_hpd_storm_ctl_write
4519};
4520
Lyude Paul9a64c652018-11-06 16:30:16 -05004521static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4522{
4523 struct drm_i915_private *dev_priv = m->private;
4524
4525 seq_printf(m, "Enabled: %s\n",
4526 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4527
4528 return 0;
4529}
4530
4531static int
4532i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4533{
4534 return single_open(file, i915_hpd_short_storm_ctl_show,
4535 inode->i_private);
4536}
4537
4538static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4539 const char __user *ubuf,
4540 size_t len, loff_t *offp)
4541{
4542 struct seq_file *m = file->private_data;
4543 struct drm_i915_private *dev_priv = m->private;
4544 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4545 char *newline;
4546 char tmp[16];
4547 int i;
4548 bool new_state;
4549
4550 if (len >= sizeof(tmp))
4551 return -EINVAL;
4552
4553 if (copy_from_user(tmp, ubuf, len))
4554 return -EFAULT;
4555
4556 tmp[len] = '\0';
4557
4558 /* Strip newline, if any */
4559 newline = strchr(tmp, '\n');
4560 if (newline)
4561 *newline = '\0';
4562
4563 /* Reset to the "default" state for this system */
4564 if (strcmp(tmp, "reset") == 0)
4565 new_state = !HAS_DP_MST(dev_priv);
4566 else if (kstrtobool(tmp, &new_state) != 0)
4567 return -EINVAL;
4568
4569 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4570 new_state ? "En" : "Dis");
4571
4572 spin_lock_irq(&dev_priv->irq_lock);
4573 hotplug->hpd_short_storm_enabled = new_state;
4574 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4575 for_each_hpd_pin(i)
4576 hotplug->stats[i].count = 0;
4577 spin_unlock_irq(&dev_priv->irq_lock);
4578
4579 /* Re-enable hpd immediately if we were in an irq storm */
4580 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4581
4582 return len;
4583}
4584
4585static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4586 .owner = THIS_MODULE,
4587 .open = i915_hpd_short_storm_ctl_open,
4588 .read = seq_read,
4589 .llseek = seq_lseek,
4590 .release = single_release,
4591 .write = i915_hpd_short_storm_ctl_write,
4592};
4593
C, Ramalingam35954e82017-11-08 00:08:23 +05304594static int i915_drrs_ctl_set(void *data, u64 val)
4595{
4596 struct drm_i915_private *dev_priv = data;
4597 struct drm_device *dev = &dev_priv->drm;
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004598 struct intel_crtc *crtc;
C, Ramalingam35954e82017-11-08 00:08:23 +05304599
4600 if (INTEL_GEN(dev_priv) < 7)
4601 return -ENODEV;
4602
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004603 for_each_intel_crtc(dev, crtc) {
4604 struct drm_connector_list_iter conn_iter;
4605 struct intel_crtc_state *crtc_state;
4606 struct drm_connector *connector;
4607 struct drm_crtc_commit *commit;
4608 int ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304609
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004610 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4611 if (ret)
4612 return ret;
4613
4614 crtc_state = to_intel_crtc_state(crtc->base.state);
4615
4616 if (!crtc_state->base.active ||
4617 !crtc_state->has_drrs)
4618 goto out;
4619
4620 commit = crtc_state->base.commit;
4621 if (commit) {
4622 ret = wait_for_completion_interruptible(&commit->hw_done);
4623 if (ret)
4624 goto out;
4625 }
4626
4627 drm_connector_list_iter_begin(dev, &conn_iter);
4628 drm_for_each_connector_iter(connector, &conn_iter) {
4629 struct intel_encoder *encoder;
4630 struct intel_dp *intel_dp;
4631
4632 if (!(crtc_state->base.connector_mask &
4633 drm_connector_mask(connector)))
4634 continue;
4635
4636 encoder = intel_attached_encoder(connector);
C, Ramalingam35954e82017-11-08 00:08:23 +05304637 if (encoder->type != INTEL_OUTPUT_EDP)
4638 continue;
4639
4640 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4641 val ? "en" : "dis", val);
4642
4643 intel_dp = enc_to_intel_dp(&encoder->base);
4644 if (val)
4645 intel_edp_drrs_enable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004646 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304647 else
4648 intel_edp_drrs_disable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004649 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304650 }
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004651 drm_connector_list_iter_end(&conn_iter);
4652
4653out:
4654 drm_modeset_unlock(&crtc->base.mutex);
4655 if (ret)
4656 return ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304657 }
C, Ramalingam35954e82017-11-08 00:08:23 +05304658
4659 return 0;
4660}
4661
4662DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4663
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004664static ssize_t
4665i915_fifo_underrun_reset_write(struct file *filp,
4666 const char __user *ubuf,
4667 size_t cnt, loff_t *ppos)
4668{
4669 struct drm_i915_private *dev_priv = filp->private_data;
4670 struct intel_crtc *intel_crtc;
4671 struct drm_device *dev = &dev_priv->drm;
4672 int ret;
4673 bool reset;
4674
4675 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4676 if (ret)
4677 return ret;
4678
4679 if (!reset)
4680 return cnt;
4681
4682 for_each_intel_crtc(dev, intel_crtc) {
4683 struct drm_crtc_commit *commit;
4684 struct intel_crtc_state *crtc_state;
4685
4686 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4687 if (ret)
4688 return ret;
4689
4690 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4691 commit = crtc_state->base.commit;
4692 if (commit) {
4693 ret = wait_for_completion_interruptible(&commit->hw_done);
4694 if (!ret)
4695 ret = wait_for_completion_interruptible(&commit->flip_done);
4696 }
4697
4698 if (!ret && crtc_state->base.active) {
4699 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4700 pipe_name(intel_crtc->pipe));
4701
4702 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4703 }
4704
4705 drm_modeset_unlock(&intel_crtc->base.mutex);
4706
4707 if (ret)
4708 return ret;
4709 }
4710
4711 ret = intel_fbc_reset_underrun(dev_priv);
4712 if (ret)
4713 return ret;
4714
4715 return cnt;
4716}
4717
4718static const struct file_operations i915_fifo_underrun_reset_ops = {
4719 .owner = THIS_MODULE,
4720 .open = simple_open,
4721 .write = i915_fifo_underrun_reset_write,
4722 .llseek = default_llseek,
4723};
4724
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004725static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004726 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004727 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004728 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004729 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004730 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004731 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004732 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004733 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004734 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004735 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004736 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004737 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004738 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304739 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004740 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004741 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004742 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004743 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004744 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004745 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004746 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004747 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004748 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004749 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004750 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004751 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004752 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004753 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004754 {"i915_swizzle_info", i915_swizzle_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004755 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004756 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004757 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004758 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004759 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004760 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004761 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004762 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004763 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004764 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004765 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004766 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004767 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004768 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004769 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304770 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004771 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004772};
Ben Gamari27c202a2009-07-01 22:26:52 -04004773#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004774
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004775static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004776 const char *name;
4777 const struct file_operations *fops;
4778} i915_debugfs_files[] = {
4779 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004780 {"i915_cache_sharing", &i915_cache_sharing_fops},
Chris Wilson094f9a52013-09-25 17:34:55 +01004781 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4782 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004783 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004784#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004785 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004786 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004787#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004788 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004789 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4790 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4791 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004792 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004793 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4794 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304795 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004796 {"i915_guc_log_level", &i915_guc_log_level_fops},
4797 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304798 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
Lyude Paul9a64c652018-11-06 16:30:16 -05004799 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304800 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004801 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4802 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004803};
4804
Chris Wilson1dac8912016-06-24 14:00:17 +01004805int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004806{
Chris Wilson91c8a322016-07-05 10:40:23 +01004807 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004808 struct dentry *ent;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004809 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004810
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004811 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4812 minor->debugfs_root, to_i915(minor->dev),
4813 &i915_forcewake_fops);
4814 if (!ent)
4815 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004816
Daniel Vetter34b96742013-07-04 20:49:44 +02004817 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004818 ent = debugfs_create_file(i915_debugfs_files[i].name,
4819 S_IRUGO | S_IWUSR,
4820 minor->debugfs_root,
4821 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004822 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004823 if (!ent)
4824 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004825 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004826
Ben Gamari27c202a2009-07-01 22:26:52 -04004827 return drm_debugfs_create_files(i915_debugfs_list,
4828 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004829 minor->debugfs_root, minor);
4830}
4831
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004832struct dpcd_block {
4833 /* DPCD dump start address. */
4834 unsigned int offset;
4835 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4836 unsigned int end;
4837 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4838 size_t size;
4839 /* Only valid for eDP. */
4840 bool edp;
4841};
4842
4843static const struct dpcd_block i915_dpcd_debug[] = {
4844 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4845 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4846 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4847 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4848 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4849 { .offset = DP_SET_POWER },
4850 { .offset = DP_EDP_DPCD_REV },
4851 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4852 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4853 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4854};
4855
4856static int i915_dpcd_show(struct seq_file *m, void *data)
4857{
4858 struct drm_connector *connector = m->private;
4859 struct intel_dp *intel_dp =
4860 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4861 uint8_t buf[16];
4862 ssize_t err;
4863 int i;
4864
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004865 if (connector->status != connector_status_connected)
4866 return -ENODEV;
4867
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004868 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4869 const struct dpcd_block *b = &i915_dpcd_debug[i];
4870 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4871
4872 if (b->edp &&
4873 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4874 continue;
4875
4876 /* low tech for now */
4877 if (WARN_ON(size > sizeof(buf)))
4878 continue;
4879
4880 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
Chris Wilson65404c82018-10-10 09:17:06 +01004881 if (err < 0)
4882 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4883 else
4884 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004885 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004886
4887 return 0;
4888}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004889DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004890
David Weinehallecbd6782016-08-23 12:23:56 +03004891static int i915_panel_show(struct seq_file *m, void *data)
4892{
4893 struct drm_connector *connector = m->private;
4894 struct intel_dp *intel_dp =
4895 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4896
4897 if (connector->status != connector_status_connected)
4898 return -ENODEV;
4899
4900 seq_printf(m, "Panel power up delay: %d\n",
4901 intel_dp->panel_power_up_delay);
4902 seq_printf(m, "Panel power down delay: %d\n",
4903 intel_dp->panel_power_down_delay);
4904 seq_printf(m, "Backlight on delay: %d\n",
4905 intel_dp->backlight_on_delay);
4906 seq_printf(m, "Backlight off delay: %d\n",
4907 intel_dp->backlight_off_delay);
4908
4909 return 0;
4910}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004911DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004912
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304913static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4914{
4915 struct drm_connector *connector = m->private;
4916 struct intel_connector *intel_connector = to_intel_connector(connector);
4917
4918 if (connector->status != connector_status_connected)
4919 return -ENODEV;
4920
4921 /* HDCP is supported by connector */
Ramalingam Cd3dacc72018-10-29 15:15:46 +05304922 if (!intel_connector->hdcp.shim)
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304923 return -EINVAL;
4924
4925 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4926 connector->base.id);
4927 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4928 "None" : "HDCP1.4");
4929 seq_puts(m, "\n");
4930
4931 return 0;
4932}
4933DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4934
Manasi Navaree845f092018-12-05 16:54:07 -08004935static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4936{
4937 struct drm_connector *connector = m->private;
4938 struct drm_device *dev = connector->dev;
4939 struct drm_crtc *crtc;
4940 struct intel_dp *intel_dp;
4941 struct drm_modeset_acquire_ctx ctx;
4942 struct intel_crtc_state *crtc_state = NULL;
4943 int ret = 0;
4944 bool try_again = false;
4945
4946 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4947
4948 do {
Manasi Navare6afe8922018-12-19 15:51:20 -08004949 try_again = false;
Manasi Navaree845f092018-12-05 16:54:07 -08004950 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4951 &ctx);
4952 if (ret) {
4953 ret = -EINTR;
4954 break;
4955 }
4956 crtc = connector->state->crtc;
4957 if (connector->status != connector_status_connected || !crtc) {
4958 ret = -ENODEV;
4959 break;
4960 }
4961 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4962 if (ret == -EDEADLK) {
4963 ret = drm_modeset_backoff(&ctx);
4964 if (!ret) {
4965 try_again = true;
4966 continue;
4967 }
4968 break;
4969 } else if (ret) {
4970 break;
4971 }
4972 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4973 crtc_state = to_intel_crtc_state(crtc->state);
4974 seq_printf(m, "DSC_Enabled: %s\n",
4975 yesno(crtc_state->dsc_params.compression_enable));
4976 if (intel_dp->dsc_dpcd)
4977 seq_printf(m, "DSC_Sink_Support: %s\n",
4978 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4979 if (!intel_dp_is_edp(intel_dp))
4980 seq_printf(m, "FEC_Sink_Support: %s\n",
4981 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4982 } while (try_again);
4983
4984 drm_modeset_drop_locks(&ctx);
4985 drm_modeset_acquire_fini(&ctx);
4986
4987 return ret;
4988}
4989
4990static ssize_t i915_dsc_fec_support_write(struct file *file,
4991 const char __user *ubuf,
4992 size_t len, loff_t *offp)
4993{
4994 bool dsc_enable = false;
4995 int ret;
4996 struct drm_connector *connector =
4997 ((struct seq_file *)file->private_data)->private;
4998 struct intel_encoder *encoder = intel_attached_encoder(connector);
4999 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
5000
5001 if (len == 0)
5002 return 0;
5003
5004 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
5005 len);
5006
5007 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
5008 if (ret < 0)
5009 return ret;
5010
5011 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
5012 (dsc_enable) ? "true" : "false");
5013 intel_dp->force_dsc_en = dsc_enable;
5014
5015 *offp += len;
5016 return len;
5017}
5018
5019static int i915_dsc_fec_support_open(struct inode *inode,
5020 struct file *file)
5021{
5022 return single_open(file, i915_dsc_fec_support_show,
5023 inode->i_private);
5024}
5025
5026static const struct file_operations i915_dsc_fec_support_fops = {
5027 .owner = THIS_MODULE,
5028 .open = i915_dsc_fec_support_open,
5029 .read = seq_read,
5030 .llseek = seq_lseek,
5031 .release = single_release,
5032 .write = i915_dsc_fec_support_write
5033};
5034
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005035/**
5036 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5037 * @connector: pointer to a registered drm_connector
5038 *
5039 * Cleanup will be done by drm_connector_unregister() through a call to
5040 * drm_debugfs_connector_remove().
5041 *
5042 * Returns 0 on success, negative error codes on error.
5043 */
5044int i915_debugfs_connector_add(struct drm_connector *connector)
5045{
5046 struct dentry *root = connector->debugfs_entry;
Manasi Navaree845f092018-12-05 16:54:07 -08005047 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005048
5049 /* The connector must have been registered beforehands. */
5050 if (!root)
5051 return -ENODEV;
5052
5053 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5054 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03005055 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5056 connector, &i915_dpcd_fops);
5057
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07005058 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
David Weinehallecbd6782016-08-23 12:23:56 +03005059 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5060 connector, &i915_panel_fops);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07005061 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5062 connector, &i915_psr_sink_status_fops);
5063 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005064
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05305065 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5066 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5067 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5068 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5069 connector, &i915_hdcp_sink_capability_fops);
5070 }
5071
Manasi Navaree845f092018-12-05 16:54:07 -08005072 if (INTEL_GEN(dev_priv) >= 10 &&
5073 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5074 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
5075 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
5076 connector, &i915_dsc_fec_support_fops);
5077
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005078 return 0;
5079}