blob: 76dea0572f3e89527a090331c90940f5fba711ec [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsonf3cd4742009-10-13 22:20:20 +010029#include <linux/debugfs.h>
Chris Wilsone637d2c2017-03-16 13:19:57 +000030#include <linux/sort.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010031#include <linux/sched/mm.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010032#include "intel_drv.h"
Sagar Arun Kamblea2695742017-11-16 19:02:41 +053033#include "intel_guc_submission.h"
Ben Gamari20172632009-02-17 20:08:50 -050034
Chris Wilson9f588922019-01-16 15:33:04 +000035#include "i915_reset.h"
36
David Weinehall36cdd012016-08-22 13:59:31 +030037static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
38{
39 return to_i915(node->minor->dev);
40}
41
Chris Wilson70d39fe2010-08-25 16:03:34 +010042static int i915_capabilities(struct seq_file *m, void *data)
43{
David Weinehall36cdd012016-08-22 13:59:31 +030044 struct drm_i915_private *dev_priv = node_to_i915(m->private);
45 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000046 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010047
David Weinehall36cdd012016-08-22 13:59:31 +030048 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020049 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030050 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000051
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000052 intel_device_info_dump_flags(info, &p);
Jani Nikula02584042018-12-31 16:56:41 +020053 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000054 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010055
Chris Wilson418e3cd2017-02-06 21:36:08 +000056 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000057 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000058 kernel_param_unlock(THIS_MODULE);
59
Chris Wilson70d39fe2010-08-25 16:03:34 +010060 return 0;
61}
Ben Gamari433e12f2009-02-17 20:08:51 -050062
Imre Deaka7363de2016-05-12 16:18:52 +030063static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000064{
Chris Wilson573adb32016-08-04 16:32:39 +010065 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000066}
67
Imre Deaka7363de2016-05-12 16:18:52 +030068static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010069{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010070 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010071}
72
Imre Deaka7363de2016-05-12 16:18:52 +030073static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000074{
Chris Wilson3e510a82016-08-05 10:14:23 +010075 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040076 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010077 case I915_TILING_NONE: return ' ';
78 case I915_TILING_X: return 'X';
79 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040080 }
Chris Wilsona6172a82009-02-11 14:26:38 +000081}
82
Imre Deaka7363de2016-05-12 16:18:52 +030083static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070084{
Chris Wilsona65adaf2017-10-09 09:43:57 +010085 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010086}
87
Imre Deaka7363de2016-05-12 16:18:52 +030088static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010089{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010090 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -070091}
92
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010093static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
94{
95 u64 size = 0;
96 struct i915_vma *vma;
97
Chris Wilsone2189dd2017-12-07 21:14:07 +000098 for_each_ggtt_vma(vma, obj) {
99 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100100 size += vma->node.size;
101 }
102
103 return size;
104}
105
Matthew Auld7393b7e2017-10-06 23:18:28 +0100106static const char *
107stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
108{
109 size_t x = 0;
110
111 switch (page_sizes) {
112 case 0:
113 return "";
114 case I915_GTT_PAGE_SIZE_4K:
115 return "4K";
116 case I915_GTT_PAGE_SIZE_64K:
117 return "64K";
118 case I915_GTT_PAGE_SIZE_2M:
119 return "2M";
120 default:
121 if (!buf)
122 return "M";
123
124 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
125 x += snprintf(buf + x, len - x, "2M, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
127 x += snprintf(buf + x, len - x, "64K, ");
128 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
129 x += snprintf(buf + x, len - x, "4K, ");
130 buf[x-2] = '\0';
131
132 return buf;
133 }
134}
135
Chris Wilson37811fc2010-08-25 22:45:57 +0100136static void
137describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
138{
Chris Wilsonb4716182015-04-27 13:41:17 +0100139 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000140 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700141 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100142 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800143 int pin_count = 0;
144
Chris Wilson188c1ab2016-04-03 14:14:20 +0100145 lockdep_assert_held(&obj->base.dev->struct_mutex);
146
Chris Wilsond07f0e52016-10-28 13:58:44 +0100147 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100148 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100149 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100150 get_pin_flag(obj),
151 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700152 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100153 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800154 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100155 obj->read_domains,
156 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300157 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100158 obj->mm.dirty ? " dirty" : "",
159 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100160 if (obj->base.name)
161 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000162 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100163 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800164 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300165 }
166 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100167 if (obj->pin_global)
168 seq_printf(m, " (global)");
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000169 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100170 if (!drm_mm_node_allocated(&vma->node))
171 continue;
172
Matthew Auld7393b7e2017-10-06 23:18:28 +0100173 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100174 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100175 vma->node.start, vma->node.size,
176 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000177 if (i915_vma_is_ggtt(vma)) {
178 switch (vma->ggtt_view.type) {
179 case I915_GGTT_VIEW_NORMAL:
180 seq_puts(m, ", normal");
181 break;
182
183 case I915_GGTT_VIEW_PARTIAL:
184 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000185 vma->ggtt_view.partial.offset << PAGE_SHIFT,
186 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000187 break;
188
189 case I915_GGTT_VIEW_ROTATED:
190 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000191 vma->ggtt_view.rotated.plane[0].width,
192 vma->ggtt_view.rotated.plane[0].height,
193 vma->ggtt_view.rotated.plane[0].stride,
194 vma->ggtt_view.rotated.plane[0].offset,
195 vma->ggtt_view.rotated.plane[1].width,
196 vma->ggtt_view.rotated.plane[1].height,
197 vma->ggtt_view.rotated.plane[1].stride,
198 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000199 break;
200
201 default:
202 MISSING_CASE(vma->ggtt_view.type);
203 break;
204 }
205 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100206 if (vma->fence)
207 seq_printf(m, " , fence: %d%s",
208 vma->fence->id,
209 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000210 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700211 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000212 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100213 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100214
Chris Wilsond07f0e52016-10-28 13:58:44 +0100215 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100216 if (engine)
217 seq_printf(m, " (%s)", engine->name);
218
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100219 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
220 if (frontbuffer_bits)
221 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100222}
223
Chris Wilsone637d2c2017-03-16 13:19:57 +0000224static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100225{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000226 const struct drm_i915_gem_object *a =
227 *(const struct drm_i915_gem_object **)A;
228 const struct drm_i915_gem_object *b =
229 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100230
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200231 if (a->stolen->start < b->stolen->start)
232 return -1;
233 if (a->stolen->start > b->stolen->start)
234 return 1;
235 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100236}
237
238static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
239{
David Weinehall36cdd012016-08-22 13:59:31 +0300240 struct drm_i915_private *dev_priv = node_to_i915(m->private);
241 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000242 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100243 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300244 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000245 unsigned long total, count, n;
246 int ret;
247
248 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200249 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000250 if (!objects)
251 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100252
253 ret = mutex_lock_interruptible(&dev->struct_mutex);
254 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000255 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100256
257 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100258
259 spin_lock(&dev_priv->mm.obj_lock);
260 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000261 if (count == total)
262 break;
263
Chris Wilson6d2b88852013-08-07 18:30:54 +0100264 if (obj->stolen == NULL)
265 continue;
266
Chris Wilsone637d2c2017-03-16 13:19:57 +0000267 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100268 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100269 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000270
Chris Wilson6d2b88852013-08-07 18:30:54 +0100271 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100272 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000273 if (count == total)
274 break;
275
Chris Wilson6d2b88852013-08-07 18:30:54 +0100276 if (obj->stolen == NULL)
277 continue;
278
Chris Wilsone637d2c2017-03-16 13:19:57 +0000279 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100280 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100281 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100282 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100283
Chris Wilsone637d2c2017-03-16 13:19:57 +0000284 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
285
286 seq_puts(m, "Stolen:\n");
287 for (n = 0; n < count; n++) {
288 seq_puts(m, " ");
289 describe_obj(m, objects[n]);
290 seq_putc(m, '\n');
291 }
292 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100293 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000294
295 mutex_unlock(&dev->struct_mutex);
296out:
Michal Hocko20981052017-05-17 14:23:12 +0200297 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000298 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100299}
300
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100301struct file_stats {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000302 struct i915_address_space *vm;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300303 unsigned long count;
304 u64 total, unbound;
305 u64 global, shared;
306 u64 active, inactive;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000307 u64 closed;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100308};
309
310static int per_file_stats(int id, void *ptr, void *data)
311{
312 struct drm_i915_gem_object *obj = ptr;
313 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000314 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100315
Chris Wilson0caf81b2017-06-17 12:57:44 +0100316 lockdep_assert_held(&obj->base.dev->struct_mutex);
317
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100318 stats->count++;
319 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100320 if (!obj->bind_count)
321 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000322 if (obj->base.name || obj->base.dma_buf)
323 stats->shared += obj->base.size;
324
Chris Wilson894eeec2016-08-04 07:52:20 +0100325 list_for_each_entry(vma, &obj->vma_list, obj_link) {
326 if (!drm_mm_node_allocated(&vma->node))
327 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000328
Chris Wilson3272db52016-08-04 16:32:32 +0100329 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100330 stats->global += vma->node.size;
331 } else {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000332 if (vma->vm != stats->vm)
Chris Wilson6313c202014-03-19 13:45:45 +0000333 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000334 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100335
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100336 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100337 stats->active += vma->node.size;
338 else
339 stats->inactive += vma->node.size;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000340
341 if (i915_vma_is_closed(vma))
342 stats->closed += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100343 }
344
345 return 0;
346}
347
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100348#define print_file_stats(m, name, stats) do { \
349 if (stats.count) \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000350 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100351 name, \
352 stats.count, \
353 stats.total, \
354 stats.active, \
355 stats.inactive, \
356 stats.global, \
357 stats.shared, \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000358 stats.unbound, \
359 stats.closed); \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100360} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800361
362static void print_batch_pool_stats(struct seq_file *m,
363 struct drm_i915_private *dev_priv)
364{
365 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000366 struct intel_engine_cs *engine;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000367 struct file_stats stats = {};
Akash Goel3b3f1652016-10-13 22:44:48 +0530368 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000369 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800370
Akash Goel3b3f1652016-10-13 22:44:48 +0530371 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000372 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100373 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000374 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100375 batch_pool_link)
376 per_file_stats(0, obj, &stats);
377 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100378 }
Brad Volkin493018d2014-12-11 12:13:08 -0800379
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100380 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800381}
382
Chris Wilson15da9562016-05-24 14:53:43 +0100383static void print_context_stats(struct seq_file *m,
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000384 struct drm_i915_private *i915)
Chris Wilson15da9562016-05-24 14:53:43 +0100385{
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000386 struct file_stats kstats = {};
387 struct i915_gem_context *ctx;
Chris Wilson15da9562016-05-24 14:53:43 +0100388
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000389 list_for_each_entry(ctx, &i915->contexts.list, link) {
390 struct intel_engine_cs *engine;
391 enum intel_engine_id id;
Chris Wilson15da9562016-05-24 14:53:43 +0100392
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000393 for_each_engine(engine, i915, id) {
394 struct intel_context *ce = to_intel_context(ctx, engine);
Chris Wilson15da9562016-05-24 14:53:43 +0100395
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000396 if (ce->state)
397 per_file_stats(0, ce->state->obj, &kstats);
398 if (ce->ring)
399 per_file_stats(0, ce->ring->vma->obj, &kstats);
400 }
401
402 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
403 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
404 struct drm_file *file = ctx->file_priv->file;
405 struct task_struct *task;
406 char name[80];
407
408 spin_lock(&file->table_lock);
409 idr_for_each(&file->object_idr, per_file_stats, &stats);
410 spin_unlock(&file->table_lock);
411
412 rcu_read_lock();
413 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
414 snprintf(name, sizeof(name), "%s/%d",
415 task ? task->comm : "<unknown>",
416 ctx->user_handle);
417 rcu_read_unlock();
418
419 print_file_stats(m, name, stats);
420 }
Chris Wilson15da9562016-05-24 14:53:43 +0100421 }
Chris Wilson15da9562016-05-24 14:53:43 +0100422
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000423 print_file_stats(m, "[k]contexts", kstats);
Chris Wilson15da9562016-05-24 14:53:43 +0100424}
425
David Weinehall36cdd012016-08-22 13:59:31 +0300426static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100427{
David Weinehall36cdd012016-08-22 13:59:31 +0300428 struct drm_i915_private *dev_priv = node_to_i915(m->private);
429 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300430 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100431 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
432 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000433 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100434 unsigned int page_sizes = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100435 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100436 int ret;
437
Chris Wilson3ef7f222016-10-18 13:02:48 +0100438 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000439 dev_priv->mm.object_count,
440 dev_priv->mm.object_memory);
441
Chris Wilson1544c422016-08-15 13:18:16 +0100442 size = count = 0;
443 mapped_size = mapped_count = 0;
444 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100445 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100446
447 spin_lock(&dev_priv->mm.obj_lock);
448 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100449 size += obj->base.size;
450 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200451
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100452 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200453 purgeable_size += obj->base.size;
454 ++purgeable_count;
455 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100456
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100457 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100458 mapped_count++;
459 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100460 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100461
462 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
463 huge_count++;
464 huge_size += obj->base.size;
465 page_sizes |= obj->mm.page_sizes.sg;
466 }
Chris Wilson6299f992010-11-24 12:23:44 +0000467 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100468 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
469
470 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100471 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100472 size += obj->base.size;
473 ++count;
474
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100475 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100476 dpy_size += obj->base.size;
477 ++dpy_count;
478 }
479
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100480 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100481 purgeable_size += obj->base.size;
482 ++purgeable_count;
483 }
484
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100485 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100486 mapped_count++;
487 mapped_size += obj->base.size;
488 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100489
490 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
491 huge_count++;
492 huge_size += obj->base.size;
493 page_sizes |= obj->mm.page_sizes.sg;
494 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100495 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100496 spin_unlock(&dev_priv->mm.obj_lock);
497
Chris Wilson2bd160a2016-08-15 10:48:45 +0100498 seq_printf(m, "%u bound objects, %llu bytes\n",
499 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300500 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200501 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100502 seq_printf(m, "%u mapped objects, %llu bytes\n",
503 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100504 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
505 huge_count,
506 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
507 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100508 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100509 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000510
Matthew Auldb7128ef2017-12-11 15:18:22 +0000511 seq_printf(m, "%llu [%pa] gtt total\n",
Chris Wilson82ad6442018-06-05 16:37:58 +0100512 ggtt->vm.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100513 seq_printf(m, "Supported page sizes: %s\n",
514 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
515 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100516
Damien Lespiau267f0c92013-06-24 22:59:48 +0100517 seq_putc(m, '\n');
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000518
519 ret = mutex_lock_interruptible(&dev->struct_mutex);
520 if (ret)
521 return ret;
522
Brad Volkin493018d2014-12-11 12:13:08 -0800523 print_batch_pool_stats(m, dev_priv);
Chris Wilson15da9562016-05-24 14:53:43 +0100524 print_context_stats(m, dev_priv);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000525 mutex_unlock(&dev->struct_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100526
527 return 0;
528}
529
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100530static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000531{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100532 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300533 struct drm_i915_private *dev_priv = node_to_i915(node);
534 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100535 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000536 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300537 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100538 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000539 int count, ret;
540
Chris Wilsonf2123812017-10-16 12:40:37 +0100541 nobject = READ_ONCE(dev_priv->mm.object_count);
542 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
543 if (!objects)
544 return -ENOMEM;
545
Chris Wilson08c18322011-01-10 00:00:24 +0000546 ret = mutex_lock_interruptible(&dev->struct_mutex);
547 if (ret)
548 return ret;
549
Chris Wilsonf2123812017-10-16 12:40:37 +0100550 count = 0;
551 spin_lock(&dev_priv->mm.obj_lock);
552 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
553 objects[count++] = obj;
554 if (count == nobject)
555 break;
556 }
557 spin_unlock(&dev_priv->mm.obj_lock);
558
559 total_obj_size = total_gtt_size = 0;
560 for (n = 0; n < count; n++) {
561 obj = objects[n];
562
Damien Lespiau267f0c92013-06-24 22:59:48 +0100563 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000564 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100565 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000566 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100567 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000568 }
569
570 mutex_unlock(&dev->struct_mutex);
571
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300572 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000573 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100574 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000575
576 return 0;
577}
578
Brad Volkin493018d2014-12-11 12:13:08 -0800579static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
580{
David Weinehall36cdd012016-08-22 13:59:31 +0300581 struct drm_i915_private *dev_priv = node_to_i915(m->private);
582 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800583 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000584 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530585 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100586 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000587 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800588
589 ret = mutex_lock_interruptible(&dev->struct_mutex);
590 if (ret)
591 return ret;
592
Akash Goel3b3f1652016-10-13 22:44:48 +0530593 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000594 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100595 int count;
596
597 count = 0;
598 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000599 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100600 batch_pool_link)
601 count++;
602 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000603 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100604
605 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000606 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100607 batch_pool_link) {
608 seq_puts(m, " ");
609 describe_obj(m, obj);
610 seq_putc(m, '\n');
611 }
612
613 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100614 }
Brad Volkin493018d2014-12-11 12:13:08 -0800615 }
616
Chris Wilson8d9d5742015-04-07 16:20:38 +0100617 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800618
619 mutex_unlock(&dev->struct_mutex);
620
621 return 0;
622}
623
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200624static void gen8_display_interrupt_info(struct seq_file *m)
625{
626 struct drm_i915_private *dev_priv = node_to_i915(m->private);
627 int pipe;
628
629 for_each_pipe(dev_priv, pipe) {
630 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000631 intel_wakeref_t wakeref;
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200632
633 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000634 wakeref = intel_display_power_get_if_enabled(dev_priv,
635 power_domain);
636 if (!wakeref) {
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200637 seq_printf(m, "Pipe %c power disabled\n",
638 pipe_name(pipe));
639 continue;
640 }
641 seq_printf(m, "Pipe %c IMR:\t%08x\n",
642 pipe_name(pipe),
643 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
644 seq_printf(m, "Pipe %c IIR:\t%08x\n",
645 pipe_name(pipe),
646 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
647 seq_printf(m, "Pipe %c IER:\t%08x\n",
648 pipe_name(pipe),
649 I915_READ(GEN8_DE_PIPE_IER(pipe)));
650
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000651 intel_display_power_put(dev_priv, power_domain, wakeref);
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200652 }
653
654 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
655 I915_READ(GEN8_DE_PORT_IMR));
656 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
657 I915_READ(GEN8_DE_PORT_IIR));
658 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
659 I915_READ(GEN8_DE_PORT_IER));
660
661 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
662 I915_READ(GEN8_DE_MISC_IMR));
663 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
664 I915_READ(GEN8_DE_MISC_IIR));
665 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
666 I915_READ(GEN8_DE_MISC_IER));
667
668 seq_printf(m, "PCU interrupt mask:\t%08x\n",
669 I915_READ(GEN8_PCU_IMR));
670 seq_printf(m, "PCU interrupt identity:\t%08x\n",
671 I915_READ(GEN8_PCU_IIR));
672 seq_printf(m, "PCU interrupt enable:\t%08x\n",
673 I915_READ(GEN8_PCU_IER));
674}
675
Ben Gamari20172632009-02-17 20:08:50 -0500676static int i915_interrupt_info(struct seq_file *m, void *data)
677{
David Weinehall36cdd012016-08-22 13:59:31 +0300678 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000679 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530680 enum intel_engine_id id;
Chris Wilsona0371212019-01-14 14:21:14 +0000681 intel_wakeref_t wakeref;
Chris Wilson4bb05042016-09-03 07:53:43 +0100682 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100683
Chris Wilsona0371212019-01-14 14:21:14 +0000684 wakeref = intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500685
David Weinehall36cdd012016-08-22 13:59:31 +0300686 if (IS_CHERRYVIEW(dev_priv)) {
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000687 intel_wakeref_t pref;
688
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300689 seq_printf(m, "Master Interrupt Control:\t%08x\n",
690 I915_READ(GEN8_MASTER_IRQ));
691
692 seq_printf(m, "Display IER:\t%08x\n",
693 I915_READ(VLV_IER));
694 seq_printf(m, "Display IIR:\t%08x\n",
695 I915_READ(VLV_IIR));
696 seq_printf(m, "Display IIR_RW:\t%08x\n",
697 I915_READ(VLV_IIR_RW));
698 seq_printf(m, "Display IMR:\t%08x\n",
699 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100700 for_each_pipe(dev_priv, pipe) {
701 enum intel_display_power_domain power_domain;
702
703 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000704 pref = intel_display_power_get_if_enabled(dev_priv,
705 power_domain);
706 if (!pref) {
Chris Wilson9c870d02016-10-24 13:42:15 +0100707 seq_printf(m, "Pipe %c power disabled\n",
708 pipe_name(pipe));
709 continue;
710 }
711
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300712 seq_printf(m, "Pipe %c stat:\t%08x\n",
713 pipe_name(pipe),
714 I915_READ(PIPESTAT(pipe)));
715
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000716 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson9c870d02016-10-24 13:42:15 +0100717 }
718
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000719 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300720 seq_printf(m, "Port hotplug:\t%08x\n",
721 I915_READ(PORT_HOTPLUG_EN));
722 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
723 I915_READ(VLV_DPFLIPSTAT));
724 seq_printf(m, "DPINVGTT:\t%08x\n",
725 I915_READ(DPINVGTT));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000726 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300727
728 for (i = 0; i < 4; i++) {
729 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
730 i, I915_READ(GEN8_GT_IMR(i)));
731 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
732 i, I915_READ(GEN8_GT_IIR(i)));
733 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
734 i, I915_READ(GEN8_GT_IER(i)));
735 }
736
737 seq_printf(m, "PCU interrupt mask:\t%08x\n",
738 I915_READ(GEN8_PCU_IMR));
739 seq_printf(m, "PCU interrupt identity:\t%08x\n",
740 I915_READ(GEN8_PCU_IIR));
741 seq_printf(m, "PCU interrupt enable:\t%08x\n",
742 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200743 } else if (INTEL_GEN(dev_priv) >= 11) {
744 seq_printf(m, "Master Interrupt Control: %08x\n",
745 I915_READ(GEN11_GFX_MSTR_IRQ));
746
747 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
748 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
749 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
750 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
751 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
752 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
753 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
754 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
755 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
756 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
757 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
758 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
759
760 seq_printf(m, "Display Interrupt Control:\t%08x\n",
761 I915_READ(GEN11_DISPLAY_INT_CTL));
762
763 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300764 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700765 seq_printf(m, "Master Interrupt Control:\t%08x\n",
766 I915_READ(GEN8_MASTER_IRQ));
767
768 for (i = 0; i < 4; i++) {
769 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
770 i, I915_READ(GEN8_GT_IMR(i)));
771 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
772 i, I915_READ(GEN8_GT_IIR(i)));
773 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
774 i, I915_READ(GEN8_GT_IER(i)));
775 }
776
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200777 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300778 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700779 seq_printf(m, "Display IER:\t%08x\n",
780 I915_READ(VLV_IER));
781 seq_printf(m, "Display IIR:\t%08x\n",
782 I915_READ(VLV_IIR));
783 seq_printf(m, "Display IIR_RW:\t%08x\n",
784 I915_READ(VLV_IIR_RW));
785 seq_printf(m, "Display IMR:\t%08x\n",
786 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000787 for_each_pipe(dev_priv, pipe) {
788 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000789 intel_wakeref_t pref;
Chris Wilson4f4631a2017-02-10 13:36:32 +0000790
791 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000792 pref = intel_display_power_get_if_enabled(dev_priv,
793 power_domain);
794 if (!pref) {
Chris Wilson4f4631a2017-02-10 13:36:32 +0000795 seq_printf(m, "Pipe %c power disabled\n",
796 pipe_name(pipe));
797 continue;
798 }
799
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700800 seq_printf(m, "Pipe %c stat:\t%08x\n",
801 pipe_name(pipe),
802 I915_READ(PIPESTAT(pipe)));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000803 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson4f4631a2017-02-10 13:36:32 +0000804 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700805
806 seq_printf(m, "Master IER:\t%08x\n",
807 I915_READ(VLV_MASTER_IER));
808
809 seq_printf(m, "Render IER:\t%08x\n",
810 I915_READ(GTIER));
811 seq_printf(m, "Render IIR:\t%08x\n",
812 I915_READ(GTIIR));
813 seq_printf(m, "Render IMR:\t%08x\n",
814 I915_READ(GTIMR));
815
816 seq_printf(m, "PM IER:\t\t%08x\n",
817 I915_READ(GEN6_PMIER));
818 seq_printf(m, "PM IIR:\t\t%08x\n",
819 I915_READ(GEN6_PMIIR));
820 seq_printf(m, "PM IMR:\t\t%08x\n",
821 I915_READ(GEN6_PMIMR));
822
823 seq_printf(m, "Port hotplug:\t%08x\n",
824 I915_READ(PORT_HOTPLUG_EN));
825 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
826 I915_READ(VLV_DPFLIPSTAT));
827 seq_printf(m, "DPINVGTT:\t%08x\n",
828 I915_READ(DPINVGTT));
829
David Weinehall36cdd012016-08-22 13:59:31 +0300830 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800831 seq_printf(m, "Interrupt enable: %08x\n",
832 I915_READ(IER));
833 seq_printf(m, "Interrupt identity: %08x\n",
834 I915_READ(IIR));
835 seq_printf(m, "Interrupt mask: %08x\n",
836 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100837 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800838 seq_printf(m, "Pipe %c stat: %08x\n",
839 pipe_name(pipe),
840 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800841 } else {
842 seq_printf(m, "North Display Interrupt enable: %08x\n",
843 I915_READ(DEIER));
844 seq_printf(m, "North Display Interrupt identity: %08x\n",
845 I915_READ(DEIIR));
846 seq_printf(m, "North Display Interrupt mask: %08x\n",
847 I915_READ(DEIMR));
848 seq_printf(m, "South Display Interrupt enable: %08x\n",
849 I915_READ(SDEIER));
850 seq_printf(m, "South Display Interrupt identity: %08x\n",
851 I915_READ(SDEIIR));
852 seq_printf(m, "South Display Interrupt mask: %08x\n",
853 I915_READ(SDEIMR));
854 seq_printf(m, "Graphics Interrupt enable: %08x\n",
855 I915_READ(GTIER));
856 seq_printf(m, "Graphics Interrupt identity: %08x\n",
857 I915_READ(GTIIR));
858 seq_printf(m, "Graphics Interrupt mask: %08x\n",
859 I915_READ(GTIMR));
860 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200861
862 if (INTEL_GEN(dev_priv) >= 11) {
863 seq_printf(m, "RCS Intr Mask:\t %08x\n",
864 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
865 seq_printf(m, "BCS Intr Mask:\t %08x\n",
866 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
867 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
868 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
869 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
870 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
871 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
872 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
873 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
874 I915_READ(GEN11_GUC_SG_INTR_MASK));
875 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
876 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
877 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
878 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
879 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
880 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
881
882 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000883 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100884 seq_printf(m,
885 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000886 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000887 }
Chris Wilson9862e602011-01-04 22:22:17 +0000888 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200889
Chris Wilsona0371212019-01-14 14:21:14 +0000890 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100891
Ben Gamari20172632009-02-17 20:08:50 -0500892 return 0;
893}
894
Chris Wilsona6172a82009-02-11 14:26:38 +0000895static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
896{
David Weinehall36cdd012016-08-22 13:59:31 +0300897 struct drm_i915_private *dev_priv = node_to_i915(m->private);
898 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100899 int i, ret;
900
901 ret = mutex_lock_interruptible(&dev->struct_mutex);
902 if (ret)
903 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000904
Chris Wilsona6172a82009-02-11 14:26:38 +0000905 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
906 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100907 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000908
Chris Wilson6c085a72012-08-20 11:40:46 +0200909 seq_printf(m, "Fence %d, pin count = %d, object = ",
910 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100911 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100912 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100913 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100914 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100915 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000916 }
917
Chris Wilson05394f32010-11-08 19:18:58 +0000918 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000919 return 0;
920}
921
Chris Wilson98a2f412016-10-12 10:05:18 +0100922#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000923static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
924 size_t count, loff_t *pos)
925{
Chris Wilson0e390372018-11-23 13:23:25 +0000926 struct i915_gpu_state *error;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000927 ssize_t ret;
Chris Wilson0e390372018-11-23 13:23:25 +0000928 void *buf;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000929
Chris Wilson0e390372018-11-23 13:23:25 +0000930 error = file->private_data;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000931 if (!error)
932 return 0;
933
Chris Wilson0e390372018-11-23 13:23:25 +0000934 /* Bounce buffer required because of kernfs __user API convenience. */
935 buf = kmalloc(count, GFP_KERNEL);
936 if (!buf)
937 return -ENOMEM;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000938
Chris Wilson0e390372018-11-23 13:23:25 +0000939 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
940 if (ret <= 0)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000941 goto out;
942
Chris Wilson0e390372018-11-23 13:23:25 +0000943 if (!copy_to_user(ubuf, buf, ret))
944 *pos += ret;
945 else
946 ret = -EFAULT;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000947
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000948out:
Chris Wilson0e390372018-11-23 13:23:25 +0000949 kfree(buf);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000950 return ret;
951}
952
953static int gpu_state_release(struct inode *inode, struct file *file)
954{
955 i915_gpu_state_put(file->private_data);
956 return 0;
957}
958
959static int i915_gpu_info_open(struct inode *inode, struct file *file)
960{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100961 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000962 struct i915_gpu_state *gpu;
Chris Wilsona0371212019-01-14 14:21:14 +0000963 intel_wakeref_t wakeref;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000964
Chris Wilsond4225a52019-01-14 14:21:23 +0000965 gpu = NULL;
966 with_intel_runtime_pm(i915, wakeref)
967 gpu = i915_capture_gpu_state(i915);
Chris Wilsone6154e42018-12-07 11:05:54 +0000968 if (IS_ERR(gpu))
969 return PTR_ERR(gpu);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000970
971 file->private_data = gpu;
972 return 0;
973}
974
975static const struct file_operations i915_gpu_info_fops = {
976 .owner = THIS_MODULE,
977 .open = i915_gpu_info_open,
978 .read = gpu_state_read,
979 .llseek = default_llseek,
980 .release = gpu_state_release,
981};
Chris Wilson98a2f412016-10-12 10:05:18 +0100982
Daniel Vetterd5442302012-04-27 15:17:40 +0200983static ssize_t
984i915_error_state_write(struct file *filp,
985 const char __user *ubuf,
986 size_t cnt,
987 loff_t *ppos)
988{
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000989 struct i915_gpu_state *error = filp->private_data;
990
991 if (!error)
992 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200993
994 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000995 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +0200996
997 return cnt;
998}
999
1000static int i915_error_state_open(struct inode *inode, struct file *file)
1001{
Chris Wilsone6154e42018-12-07 11:05:54 +00001002 struct i915_gpu_state *error;
1003
1004 error = i915_first_error_state(inode->i_private);
1005 if (IS_ERR(error))
1006 return PTR_ERR(error);
1007
1008 file->private_data = error;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001009 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001010}
1011
Daniel Vetterd5442302012-04-27 15:17:40 +02001012static const struct file_operations i915_error_state_fops = {
1013 .owner = THIS_MODULE,
1014 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001015 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001016 .write = i915_error_state_write,
1017 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001018 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001019};
Chris Wilson98a2f412016-10-12 10:05:18 +01001020#endif
1021
Deepak Sadb4bd12014-03-31 11:30:02 +05301022static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001023{
David Weinehall36cdd012016-08-22 13:59:31 +03001024 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001025 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsona0371212019-01-14 14:21:14 +00001026 intel_wakeref_t wakeref;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001027 int ret = 0;
1028
Chris Wilsona0371212019-01-14 14:21:14 +00001029 wakeref = intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001030
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001031 if (IS_GEN(dev_priv, 5)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001032 u16 rgvswctl = I915_READ16(MEMSWCTL);
1033 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1034
1035 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1036 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1037 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1038 MEMSTAT_VID_SHIFT);
1039 seq_printf(m, "Current P-state: %d\n",
1040 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001041 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001042 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001043
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001044 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001045
1046 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1047 seq_printf(m, "Video Turbo Mode: %s\n",
1048 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1049 seq_printf(m, "HW control enabled: %s\n",
1050 yesno(rpmodectl & GEN6_RP_ENABLE));
1051 seq_printf(m, "SW control enabled: %s\n",
1052 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1053 GEN6_RP_MEDIA_SW_MODE));
1054
Wayne Boyer666a4532015-12-09 12:29:35 -08001055 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1056 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1057 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1058
1059 seq_printf(m, "actual GPU freq: %d MHz\n",
1060 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1061
1062 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001063 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001064
1065 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001066 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001067
1068 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001069 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001070
1071 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001072 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001073
1074 seq_printf(m,
1075 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001076 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001077 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001078 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001079 u32 rp_state_limits;
1080 u32 gt_perf_status;
1081 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001082 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001083 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001084 u32 rpupei, rpcurup, rpprevup;
1085 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001086 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001087 int max_freq;
1088
Bob Paauwe35040562015-06-25 14:54:07 -07001089 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001090 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001091 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1092 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1093 } else {
1094 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1095 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1096 }
1097
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001098 /* RPSTAT1 is in the GT power well */
Mika Kuoppala59bad942015-01-16 11:34:40 +02001099 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001100
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001101 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001102 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301103 reqf >>= 23;
1104 else {
1105 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001106 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301107 reqf >>= 24;
1108 else
1109 reqf >>= 25;
1110 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001111 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001112
Chris Wilson0d8f9492014-03-27 09:06:14 +00001113 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1114 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1115 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1116
Jesse Barnesccab5c82011-01-18 15:49:25 -08001117 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301118 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1119 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1120 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1121 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1122 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1123 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001124 cagf = intel_gpu_freq(dev_priv,
1125 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001126
Mika Kuoppala59bad942015-01-16 11:34:40 +02001127 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001128
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001129 if (INTEL_GEN(dev_priv) >= 11) {
1130 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1131 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1132 /*
1133 * The equivalent to the PM ISR & IIR cannot be read
1134 * without affecting the current state of the system
1135 */
1136 pm_isr = 0;
1137 pm_iir = 0;
1138 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001139 pm_ier = I915_READ(GEN8_GT_IER(2));
1140 pm_imr = I915_READ(GEN8_GT_IMR(2));
1141 pm_isr = I915_READ(GEN8_GT_ISR(2));
1142 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001143 } else {
1144 pm_ier = I915_READ(GEN6_PMIER);
1145 pm_imr = I915_READ(GEN6_PMIMR);
1146 pm_isr = I915_READ(GEN6_PMISR);
1147 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001148 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001149 pm_mask = I915_READ(GEN6_PMINTRMSK);
1150
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001151 seq_printf(m, "Video Turbo Mode: %s\n",
1152 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1153 seq_printf(m, "HW control enabled: %s\n",
1154 yesno(rpmodectl & GEN6_RP_ENABLE));
1155 seq_printf(m, "SW control enabled: %s\n",
1156 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1157 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001158
1159 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1160 pm_ier, pm_imr, pm_mask);
1161 if (INTEL_GEN(dev_priv) <= 10)
1162 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1163 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301164 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001165 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001166 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001167 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001168 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001169 seq_printf(m, "Render p-state VID: %d\n",
1170 gt_perf_status & 0xff);
1171 seq_printf(m, "Render p-state limit: %d\n",
1172 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001173 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1174 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1175 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1176 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001177 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001178 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301179 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1180 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1181 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1182 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1183 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1184 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilson60548c52018-07-31 14:26:29 +01001185 seq_printf(m, "Up threshold: %d%%\n",
1186 rps->power.up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001187
Akash Goeld6cda9c2016-04-23 00:05:46 +05301188 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1189 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1190 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1191 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1192 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1193 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilson60548c52018-07-31 14:26:29 +01001194 seq_printf(m, "Down threshold: %d%%\n",
1195 rps->power.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001196
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001197 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001198 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001199 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001200 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001201 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001202 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001203
1204 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001205 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001206 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001207 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001208 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001209
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001210 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001211 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001212 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001213 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001214 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001215 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001216 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001217 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001218
Chris Wilsond86ed342015-04-27 13:41:19 +01001219 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001220 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001221 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001222 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001223 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001224 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001225 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001226 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001227 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001228 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001229 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001230 seq_printf(m,
1231 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001232 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001233 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001234 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001235 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001236
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001237 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001238 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1239 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1240
Chris Wilsona0371212019-01-14 14:21:14 +00001241 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001242 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001243}
1244
Ben Widawskyd6369512016-09-20 16:54:32 +03001245static void i915_instdone_info(struct drm_i915_private *dev_priv,
1246 struct seq_file *m,
1247 struct intel_instdone *instdone)
1248{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001249 int slice;
1250 int subslice;
1251
Ben Widawskyd6369512016-09-20 16:54:32 +03001252 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1253 instdone->instdone);
1254
1255 if (INTEL_GEN(dev_priv) <= 3)
1256 return;
1257
1258 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1259 instdone->slice_common);
1260
1261 if (INTEL_GEN(dev_priv) <= 6)
1262 return;
1263
Ben Widawskyf9e61372016-09-20 16:54:33 +03001264 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1265 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1266 slice, subslice, instdone->sampler[slice][subslice]);
1267
1268 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1269 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1270 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001271}
1272
Chris Wilsonf6544492015-01-26 18:03:04 +02001273static int i915_hangcheck_info(struct seq_file *m, void *unused)
1274{
David Weinehall36cdd012016-08-22 13:59:31 +03001275 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001276 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001277 u64 acthd[I915_NUM_ENGINES];
1278 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001279 struct intel_instdone instdone;
Chris Wilsona0371212019-01-14 14:21:14 +00001280 intel_wakeref_t wakeref;
Dave Gordonc3232b12016-03-23 18:19:53 +00001281 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001282
Chris Wilson8af29b02016-09-09 14:11:47 +01001283 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001284 seq_puts(m, "Wedged\n");
1285 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1286 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001287 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001288 seq_puts(m, "Waiter holding struct mutex\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001289 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001290 seq_puts(m, "struct_mutex blocked for reset\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001291
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001292 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001293 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001294 return 0;
1295 }
1296
Chris Wilsond4225a52019-01-14 14:21:23 +00001297 with_intel_runtime_pm(dev_priv, wakeref) {
1298 for_each_engine(engine, dev_priv, id) {
1299 acthd[id] = intel_engine_get_active_head(engine);
1300 seqno[id] = intel_engine_get_seqno(engine);
1301 }
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001302
Chris Wilsond4225a52019-01-14 14:21:23 +00001303 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001304 }
1305
Chris Wilson8352aea2017-03-03 09:00:56 +00001306 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1307 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001308 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1309 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001310 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1311 seq_puts(m, "Hangcheck active, work pending\n");
1312 else
1313 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001314
Chris Wilsonf73b5672017-03-02 15:03:56 +00001315 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1316
Akash Goel3b3f1652016-10-13 22:44:48 +05301317 for_each_engine(engine, dev_priv, id) {
Chris Wilson33f53712016-10-04 21:11:32 +01001318 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1319 struct rb_node *rb;
1320
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001321 seq_printf(m, "%s:\n", engine->name);
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001322 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
Chris Wilsoncb399ea2016-11-01 10:03:16 +00001323 engine->hangcheck.seqno, seqno[id],
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001324 intel_engine_last_submit(engine),
1325 jiffies_to_msecs(jiffies -
1326 engine->hangcheck.action_timestamp));
1327 seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
Chris Wilson83348ba2016-08-09 17:47:51 +01001328 yesno(intel_engine_has_waiter(engine)),
1329 yesno(test_bit(engine->id,
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001330 &dev_priv->gpu_error.missed_irq_rings)));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001331
Chris Wilson61d3dc72017-03-03 19:08:24 +00001332 spin_lock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001333 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
Geliang Tangf802cf72016-12-19 22:43:49 +08001334 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
Chris Wilson33f53712016-10-04 21:11:32 +01001335
1336 seq_printf(m, "\t%s [%d] waiting for %x\n",
1337 w->tsk->comm, w->tsk->pid, w->seqno);
1338 }
Chris Wilson61d3dc72017-03-03 19:08:24 +00001339 spin_unlock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001340
Chris Wilsonf6544492015-01-26 18:03:04 +02001341 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001342 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001343 (long long)acthd[id]);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001344
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001345 if (engine->id == RCS) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001346 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001347
Ben Widawskyd6369512016-09-20 16:54:32 +03001348 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001349
Ben Widawskyd6369512016-09-20 16:54:32 +03001350 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001351
Ben Widawskyd6369512016-09-20 16:54:32 +03001352 i915_instdone_info(dev_priv, m,
1353 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001354 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001355 }
1356
1357 return 0;
1358}
1359
Michel Thierry061d06a2017-06-20 10:57:49 +01001360static int i915_reset_info(struct seq_file *m, void *unused)
1361{
1362 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1363 struct i915_gpu_error *error = &dev_priv->gpu_error;
1364 struct intel_engine_cs *engine;
1365 enum intel_engine_id id;
1366
1367 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1368
1369 for_each_engine(engine, dev_priv, id) {
1370 seq_printf(m, "%s = %u\n", engine->name,
1371 i915_reset_engine_count(error, engine));
1372 }
1373
1374 return 0;
1375}
1376
Ben Widawsky4d855292011-12-12 19:34:16 -08001377static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001378{
David Weinehall36cdd012016-08-22 13:59:31 +03001379 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001380 u32 rgvmodectl, rstdbyctl;
1381 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001382
Ben Widawsky616fdb52011-10-05 11:44:54 -07001383 rgvmodectl = I915_READ(MEMMODECTL);
1384 rstdbyctl = I915_READ(RSTDBYCTL);
1385 crstandvid = I915_READ16(CRSTANDVID);
1386
Jani Nikula742f4912015-09-03 11:16:09 +03001387 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001388 seq_printf(m, "Boost freq: %d\n",
1389 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1390 MEMMODE_BOOST_FREQ_SHIFT);
1391 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001392 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001393 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001394 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001395 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001396 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001397 seq_printf(m, "Starting frequency: P%d\n",
1398 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001399 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001400 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001401 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1402 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1403 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1404 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001405 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001406 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001407 switch (rstdbyctl & RSX_STATUS_MASK) {
1408 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001409 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001410 break;
1411 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001412 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001413 break;
1414 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001415 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001416 break;
1417 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001418 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001419 break;
1420 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001421 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001422 break;
1423 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001424 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001425 break;
1426 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001427 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001428 break;
1429 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001430
1431 return 0;
1432}
1433
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001434static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001435{
Chris Wilson233ebf52017-03-23 10:19:44 +00001436 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001437 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001438 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001439
Chris Wilsond7a133d2017-09-07 14:44:41 +01001440 seq_printf(m, "user.bypass_count = %u\n",
1441 i915->uncore.user_forcewake.count);
1442
Chris Wilson233ebf52017-03-23 10:19:44 +00001443 for_each_fw_domain(fw_domain, i915, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001444 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001445 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001446 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001447
1448 return 0;
1449}
1450
Mika Kuoppala13628772017-03-15 17:43:02 +02001451static void print_rc6_res(struct seq_file *m,
1452 const char *title,
1453 const i915_reg_t reg)
1454{
1455 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1456
1457 seq_printf(m, "%s %u (%llu us)\n",
1458 title, I915_READ(reg),
1459 intel_rc6_residency_us(dev_priv, reg));
1460}
1461
Deepak S669ab5a2014-01-10 15:18:26 +05301462static int vlv_drpc_info(struct seq_file *m)
1463{
David Weinehall36cdd012016-08-22 13:59:31 +03001464 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001465 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301466
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001467 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301468 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1469
Deepak S669ab5a2014-01-10 15:18:26 +05301470 seq_printf(m, "RC6 Enabled: %s\n",
1471 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1472 GEN6_RC_CTL_EI_MODE(1))));
1473 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001474 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301475 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001476 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301477
Mika Kuoppala13628772017-03-15 17:43:02 +02001478 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1479 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001480
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001481 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301482}
1483
Ben Widawsky4d855292011-12-12 19:34:16 -08001484static int gen6_drpc_info(struct seq_file *m)
1485{
David Weinehall36cdd012016-08-22 13:59:31 +03001486 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001487 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301488 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001489
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001490 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001491 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001492
Ben Widawsky4d855292011-12-12 19:34:16 -08001493 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001494 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301495 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1496 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1497 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001498
Imre Deak51cc9ad2018-02-08 19:41:02 +02001499 if (INTEL_GEN(dev_priv) <= 7) {
1500 mutex_lock(&dev_priv->pcu_lock);
1501 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1502 &rc6vids);
1503 mutex_unlock(&dev_priv->pcu_lock);
1504 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001505
Eric Anholtfff24e22012-01-23 16:14:05 -08001506 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001507 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1508 seq_printf(m, "RC6 Enabled: %s\n",
1509 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001510 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301511 seq_printf(m, "Render Well Gating Enabled: %s\n",
1512 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1513 seq_printf(m, "Media Well Gating Enabled: %s\n",
1514 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1515 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001516 seq_printf(m, "Deep RC6 Enabled: %s\n",
1517 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1518 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1519 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001520 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001521 switch (gt_core_status & GEN6_RCn_MASK) {
1522 case GEN6_RC0:
1523 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001524 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001525 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001526 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001527 break;
1528 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001529 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001530 break;
1531 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001532 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001533 break;
1534 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001535 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001536 break;
1537 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001538 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001539 break;
1540 }
1541
1542 seq_printf(m, "Core Power Down: %s\n",
1543 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001544 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301545 seq_printf(m, "Render Power Well: %s\n",
1546 (gen9_powergate_status &
1547 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1548 seq_printf(m, "Media Power Well: %s\n",
1549 (gen9_powergate_status &
1550 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1551 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001552
1553 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001554 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1555 GEN6_GT_GFX_RC6_LOCKED);
1556 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1557 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1558 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001559
Imre Deak51cc9ad2018-02-08 19:41:02 +02001560 if (INTEL_GEN(dev_priv) <= 7) {
1561 seq_printf(m, "RC6 voltage: %dmV\n",
1562 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1563 seq_printf(m, "RC6+ voltage: %dmV\n",
1564 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1565 seq_printf(m, "RC6++ voltage: %dmV\n",
1566 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1567 }
1568
Akash Goelf2dd7572016-06-27 20:10:01 +05301569 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001570}
1571
1572static int i915_drpc_info(struct seq_file *m, void *unused)
1573{
David Weinehall36cdd012016-08-22 13:59:31 +03001574 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001575 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00001576 int err = -ENODEV;
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001577
Chris Wilsond4225a52019-01-14 14:21:23 +00001578 with_intel_runtime_pm(dev_priv, wakeref) {
1579 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1580 err = vlv_drpc_info(m);
1581 else if (INTEL_GEN(dev_priv) >= 6)
1582 err = gen6_drpc_info(m);
1583 else
1584 err = ironlake_drpc_info(m);
1585 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001586
1587 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001588}
1589
Daniel Vetter9a851782015-06-18 10:30:22 +02001590static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1591{
David Weinehall36cdd012016-08-22 13:59:31 +03001592 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001593
1594 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1595 dev_priv->fb_tracking.busy_bits);
1596
1597 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1598 dev_priv->fb_tracking.flip_bits);
1599
1600 return 0;
1601}
1602
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001603static int i915_fbc_status(struct seq_file *m, void *unused)
1604{
David Weinehall36cdd012016-08-22 13:59:31 +03001605 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001606 struct intel_fbc *fbc = &dev_priv->fbc;
Chris Wilsona0371212019-01-14 14:21:14 +00001607 intel_wakeref_t wakeref;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001608
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001609 if (!HAS_FBC(dev_priv))
1610 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001611
Chris Wilsona0371212019-01-14 14:21:14 +00001612 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001613 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001614
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001615 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001616 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001617 else
Chris Wilson31388722017-12-20 20:58:48 +00001618 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1619
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001620 if (intel_fbc_is_active(dev_priv)) {
1621 u32 mask;
1622
1623 if (INTEL_GEN(dev_priv) >= 8)
1624 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1625 else if (INTEL_GEN(dev_priv) >= 7)
1626 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1627 else if (INTEL_GEN(dev_priv) >= 5)
1628 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1629 else if (IS_G4X(dev_priv))
1630 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1631 else
1632 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1633 FBC_STAT_COMPRESSED);
1634
1635 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001636 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001637
Chris Wilson31388722017-12-20 20:58:48 +00001638 mutex_unlock(&fbc->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00001639 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001640
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001641 return 0;
1642}
1643
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001644static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001645{
David Weinehall36cdd012016-08-22 13:59:31 +03001646 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001647
David Weinehall36cdd012016-08-22 13:59:31 +03001648 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001649 return -ENODEV;
1650
Rodrigo Vivida46f932014-08-01 02:04:45 -07001651 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001652
1653 return 0;
1654}
1655
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001656static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001657{
David Weinehall36cdd012016-08-22 13:59:31 +03001658 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001659 u32 reg;
1660
David Weinehall36cdd012016-08-22 13:59:31 +03001661 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001662 return -ENODEV;
1663
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001664 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001665
1666 reg = I915_READ(ILK_DPFC_CONTROL);
1667 dev_priv->fbc.false_color = val;
1668
1669 I915_WRITE(ILK_DPFC_CONTROL, val ?
1670 (reg | FBC_CTL_FALSE_COLOR) :
1671 (reg & ~FBC_CTL_FALSE_COLOR));
1672
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001673 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001674 return 0;
1675}
1676
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001677DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1678 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001679 "%llu\n");
1680
Paulo Zanoni92d44622013-05-31 16:33:24 -03001681static int i915_ips_status(struct seq_file *m, void *unused)
1682{
David Weinehall36cdd012016-08-22 13:59:31 +03001683 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001684 intel_wakeref_t wakeref;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001685
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001686 if (!HAS_IPS(dev_priv))
1687 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001688
Chris Wilsona0371212019-01-14 14:21:14 +00001689 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001690
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001691 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001692 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001693
David Weinehall36cdd012016-08-22 13:59:31 +03001694 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001695 seq_puts(m, "Currently: unknown\n");
1696 } else {
1697 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1698 seq_puts(m, "Currently: enabled\n");
1699 else
1700 seq_puts(m, "Currently: disabled\n");
1701 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001702
Chris Wilsona0371212019-01-14 14:21:14 +00001703 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001704
Paulo Zanoni92d44622013-05-31 16:33:24 -03001705 return 0;
1706}
1707
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001708static int i915_sr_status(struct seq_file *m, void *unused)
1709{
David Weinehall36cdd012016-08-22 13:59:31 +03001710 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001711 intel_wakeref_t wakeref;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001712 bool sr_enabled = false;
1713
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001714 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001715
Chris Wilson7342a722017-03-09 14:20:49 +00001716 if (INTEL_GEN(dev_priv) >= 9)
1717 /* no global SR status; inspect per-plane WM */;
1718 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001719 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001720 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001721 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001722 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001723 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001724 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001725 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001726 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001727 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001728 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001729
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001730 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001731
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001732 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001733
1734 return 0;
1735}
1736
Jesse Barnes7648fa92010-05-20 14:28:11 -07001737static int i915_emon_status(struct seq_file *m, void *unused)
1738{
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001739 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001740 intel_wakeref_t wakeref;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001741
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001742 if (!IS_GEN(i915, 5))
Chris Wilson582be6b2012-04-30 19:35:02 +01001743 return -ENODEV;
1744
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001745 with_intel_runtime_pm(i915, wakeref) {
1746 unsigned long temp, chipset, gfx;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001747
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001748 temp = i915_mch_val(i915);
1749 chipset = i915_chipset_val(i915);
1750 gfx = i915_gfx_val(i915);
Chris Wilsona0371212019-01-14 14:21:14 +00001751
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001752 seq_printf(m, "GMCH temp: %ld\n", temp);
1753 seq_printf(m, "Chipset power: %ld\n", chipset);
1754 seq_printf(m, "GFX power: %ld\n", gfx);
1755 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1756 }
Jesse Barnes7648fa92010-05-20 14:28:11 -07001757
1758 return 0;
1759}
1760
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001761static int i915_ring_freq_table(struct seq_file *m, void *unused)
1762{
David Weinehall36cdd012016-08-22 13:59:31 +03001763 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001764 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301765 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001766 intel_wakeref_t wakeref;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001767 int gpu_freq, ia_freq;
1768 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001769
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001770 if (!HAS_LLC(dev_priv))
1771 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001772
Chris Wilsona0371212019-01-14 14:21:14 +00001773 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001774
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001775 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001776 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001777 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001778
Chris Wilsond586b5f2018-03-08 14:26:48 +00001779 min_gpu_freq = rps->min_freq;
1780 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001781 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301782 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001783 min_gpu_freq /= GEN9_FREQ_SCALER;
1784 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301785 }
1786
Damien Lespiau267f0c92013-06-24 22:59:48 +01001787 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001788
Akash Goelf936ec32015-06-29 14:50:22 +05301789 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001790 ia_freq = gpu_freq;
1791 sandybridge_pcode_read(dev_priv,
1792 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1793 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001794 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301795 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001796 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001797 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001798 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001799 ((ia_freq >> 0) & 0xff) * 100,
1800 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001801 }
1802
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001803 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001804
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001805out:
Chris Wilsona0371212019-01-14 14:21:14 +00001806 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001807 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001808}
1809
Chris Wilson44834a62010-08-19 16:09:23 +01001810static int i915_opregion(struct seq_file *m, void *unused)
1811{
David Weinehall36cdd012016-08-22 13:59:31 +03001812 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1813 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001814 struct intel_opregion *opregion = &dev_priv->opregion;
1815 int ret;
1816
1817 ret = mutex_lock_interruptible(&dev->struct_mutex);
1818 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001819 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001820
Jani Nikula2455a8e2015-12-14 12:50:53 +02001821 if (opregion->header)
1822 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001823
1824 mutex_unlock(&dev->struct_mutex);
1825
Daniel Vetter0d38f002012-04-21 22:49:10 +02001826out:
Chris Wilson44834a62010-08-19 16:09:23 +01001827 return 0;
1828}
1829
Jani Nikulaada8f952015-12-15 13:17:12 +02001830static int i915_vbt(struct seq_file *m, void *unused)
1831{
David Weinehall36cdd012016-08-22 13:59:31 +03001832 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001833
1834 if (opregion->vbt)
1835 seq_write(m, opregion->vbt, opregion->vbt_size);
1836
1837 return 0;
1838}
1839
Chris Wilson37811fc2010-08-25 22:45:57 +01001840static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1841{
David Weinehall36cdd012016-08-22 13:59:31 +03001842 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1843 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301844 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001845 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001846 int ret;
1847
1848 ret = mutex_lock_interruptible(&dev->struct_mutex);
1849 if (ret)
1850 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001851
Daniel Vetter06957262015-08-10 13:34:08 +02001852#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001853 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001854 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001855
Chris Wilson25bcce92016-07-02 15:36:00 +01001856 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1857 fbdev_fb->base.width,
1858 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001859 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001860 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001861 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001862 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001863 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001864 seq_putc(m, '\n');
1865 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001866#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001867
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001868 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001869 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301870 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1871 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001872 continue;
1873
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001874 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001875 fb->base.width,
1876 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001877 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001878 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001879 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001880 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001881 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001882 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001883 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001884 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001885 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001886
1887 return 0;
1888}
1889
Chris Wilson7e37f882016-08-02 22:50:21 +01001890static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001891{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001892 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1893 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001894}
1895
Ben Widawskye76d3632011-03-19 18:14:29 -07001896static int i915_context_status(struct seq_file *m, void *unused)
1897{
David Weinehall36cdd012016-08-22 13:59:31 +03001898 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1899 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001900 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001901 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05301902 enum intel_engine_id id;
Dave Gordonc3232b12016-03-23 18:19:53 +00001903 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001904
Daniel Vetterf3d28872014-05-29 23:23:08 +02001905 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001906 if (ret)
1907 return ret;
1908
Chris Wilson829a0af2017-06-20 12:05:45 +01001909 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson288f1ce2018-09-04 16:31:17 +01001910 seq_puts(m, "HW context ");
1911 if (!list_empty(&ctx->hw_id_link))
1912 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1913 atomic_read(&ctx->hw_id_pin_count));
Chris Wilsonc84455b2016-08-15 10:49:08 +01001914 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001915 struct task_struct *task;
1916
Chris Wilsonc84455b2016-08-15 10:49:08 +01001917 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001918 if (task) {
1919 seq_printf(m, "(%s [%d]) ",
1920 task->comm, task->pid);
1921 put_task_struct(task);
1922 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001923 } else if (IS_ERR(ctx->file_priv)) {
1924 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001925 } else {
1926 seq_puts(m, "(kernel) ");
1927 }
1928
Chris Wilsonbca44d82016-05-24 14:53:41 +01001929 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1930 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001931
Akash Goel3b3f1652016-10-13 22:44:48 +05301932 for_each_engine(engine, dev_priv, id) {
Chris Wilsonab82a062018-04-30 14:15:01 +01001933 struct intel_context *ce =
1934 to_intel_context(ctx, engine);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001935
1936 seq_printf(m, "%s: ", engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001937 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001938 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001939 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001940 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001941 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001942 }
1943
Ben Widawskya33afea2013-09-17 21:12:45 -07001944 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001945 }
1946
Daniel Vetterf3d28872014-05-29 23:23:08 +02001947 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001948
1949 return 0;
1950}
1951
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001952static const char *swizzle_string(unsigned swizzle)
1953{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001954 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001955 case I915_BIT_6_SWIZZLE_NONE:
1956 return "none";
1957 case I915_BIT_6_SWIZZLE_9:
1958 return "bit9";
1959 case I915_BIT_6_SWIZZLE_9_10:
1960 return "bit9/bit10";
1961 case I915_BIT_6_SWIZZLE_9_11:
1962 return "bit9/bit11";
1963 case I915_BIT_6_SWIZZLE_9_10_11:
1964 return "bit9/bit10/bit11";
1965 case I915_BIT_6_SWIZZLE_9_17:
1966 return "bit9/bit17";
1967 case I915_BIT_6_SWIZZLE_9_10_17:
1968 return "bit9/bit10/bit17";
1969 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001970 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001971 }
1972
1973 return "bug";
1974}
1975
1976static int i915_swizzle_info(struct seq_file *m, void *data)
1977{
David Weinehall36cdd012016-08-22 13:59:31 +03001978 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001979 intel_wakeref_t wakeref;
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001980
Chris Wilsona0371212019-01-14 14:21:14 +00001981 wakeref = intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001982
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001983 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1984 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1985 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1986 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1987
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08001988 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001989 seq_printf(m, "DDC = 0x%08x\n",
1990 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01001991 seq_printf(m, "DDC2 = 0x%08x\n",
1992 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001993 seq_printf(m, "C0DRB3 = 0x%04x\n",
1994 I915_READ16(C0DRB3));
1995 seq_printf(m, "C1DRB3 = 0x%04x\n",
1996 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03001997 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001998 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1999 I915_READ(MAD_DIMM_C0));
2000 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2001 I915_READ(MAD_DIMM_C1));
2002 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2003 I915_READ(MAD_DIMM_C2));
2004 seq_printf(m, "TILECTL = 0x%08x\n",
2005 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03002006 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07002007 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2008 I915_READ(GAMTARBMODE));
2009 else
2010 seq_printf(m, "ARB_MODE = 0x%08x\n",
2011 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002012 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2013 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002014 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01002015
2016 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2017 seq_puts(m, "L-shaped memory detected\n");
2018
Chris Wilsona0371212019-01-14 14:21:14 +00002019 intel_runtime_pm_put(dev_priv, wakeref);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002020
2021 return 0;
2022}
2023
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002024static int count_irq_waiters(struct drm_i915_private *i915)
2025{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002026 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302027 enum intel_engine_id id;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002028 int count = 0;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002029
Akash Goel3b3f1652016-10-13 22:44:48 +05302030 for_each_engine(engine, i915, id)
Chris Wilson688e6c72016-07-01 17:23:15 +01002031 count += intel_engine_has_waiter(engine);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002032
2033 return count;
2034}
2035
Chris Wilson7466c292016-08-15 09:49:33 +01002036static const char *rps_power_to_str(unsigned int power)
2037{
2038 static const char * const strings[] = {
2039 [LOW_POWER] = "low power",
2040 [BETWEEN] = "mixed",
2041 [HIGH_POWER] = "high power",
2042 };
2043
2044 if (power >= ARRAY_SIZE(strings) || !strings[power])
2045 return "unknown";
2046
2047 return strings[power];
2048}
2049
Chris Wilson1854d5c2015-04-07 16:20:32 +01002050static int i915_rps_boost_info(struct seq_file *m, void *data)
2051{
David Weinehall36cdd012016-08-22 13:59:31 +03002052 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2053 struct drm_device *dev = &dev_priv->drm;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002054 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002055 u32 act_freq = rps->cur_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00002056 intel_wakeref_t wakeref;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002057 struct drm_file *file;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002058
Chris Wilsond4225a52019-01-14 14:21:23 +00002059 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002060 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2061 mutex_lock(&dev_priv->pcu_lock);
2062 act_freq = vlv_punit_read(dev_priv,
2063 PUNIT_REG_GPU_FREQ_STS);
2064 act_freq = (act_freq >> 8) & 0xff;
2065 mutex_unlock(&dev_priv->pcu_lock);
2066 } else {
2067 act_freq = intel_get_cagf(dev_priv,
2068 I915_READ(GEN6_RPSTAT1));
2069 }
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002070 }
2071
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002072 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002073 seq_printf(m, "GPU busy? %s [%d requests]\n",
2074 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002075 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002076 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002077 atomic_read(&rps->num_waiters));
Chris Wilson60548c52018-07-31 14:26:29 +01002078 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002079 seq_printf(m, "Frequency requested %d, actual %d\n",
2080 intel_gpu_freq(dev_priv, rps->cur_freq),
2081 intel_gpu_freq(dev_priv, act_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002082 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002083 intel_gpu_freq(dev_priv, rps->min_freq),
2084 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2085 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2086 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002087 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002088 intel_gpu_freq(dev_priv, rps->idle_freq),
2089 intel_gpu_freq(dev_priv, rps->efficient_freq),
2090 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002091
2092 mutex_lock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002093 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2094 struct drm_i915_file_private *file_priv = file->driver_priv;
2095 struct task_struct *task;
2096
2097 rcu_read_lock();
2098 task = pid_task(file->pid, PIDTYPE_PID);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002099 seq_printf(m, "%s [%d]: %d boosts\n",
Chris Wilson1854d5c2015-04-07 16:20:32 +01002100 task ? task->comm : "<unknown>",
2101 task ? task->pid : -1,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002102 atomic_read(&file_priv->rps_client.boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002103 rcu_read_unlock();
2104 }
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002105 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002106 atomic_read(&rps->boosts));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002107 mutex_unlock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002108
Chris Wilson7466c292016-08-15 09:49:33 +01002109 if (INTEL_GEN(dev_priv) >= 6 &&
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002110 rps->enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002111 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002112 u32 rpup, rpupei;
2113 u32 rpdown, rpdownei;
2114
2115 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2116 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2117 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2118 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2119 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2120 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2121
2122 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Chris Wilson60548c52018-07-31 14:26:29 +01002123 rps_power_to_str(rps->power.mode));
Chris Wilson7466c292016-08-15 09:49:33 +01002124 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002125 rpup && rpupei ? 100 * rpup / rpupei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002126 rps->power.up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002127 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002128 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002129 rps->power.down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002130 } else {
2131 seq_puts(m, "\nRPS Autotuning inactive\n");
2132 }
2133
Chris Wilson8d3afd72015-05-21 21:01:47 +01002134 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002135}
2136
Ben Widawsky63573eb2013-07-04 11:02:07 -07002137static int i915_llc(struct seq_file *m, void *data)
2138{
David Weinehall36cdd012016-08-22 13:59:31 +03002139 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002140 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002141
David Weinehall36cdd012016-08-22 13:59:31 +03002142 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002143 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2144 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002145
2146 return 0;
2147}
2148
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002149static int i915_huc_load_status_info(struct seq_file *m, void *data)
2150{
2151 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002152 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002153 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002154
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002155 if (!HAS_HUC(dev_priv))
2156 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002157
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002158 p = drm_seq_file_printer(m);
2159 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002160
Chris Wilsond4225a52019-01-14 14:21:23 +00002161 with_intel_runtime_pm(dev_priv, wakeref)
2162 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002163
2164 return 0;
2165}
2166
Alex Daifdf5d352015-08-12 15:43:37 +01002167static int i915_guc_load_status_info(struct seq_file *m, void *data)
2168{
David Weinehall36cdd012016-08-22 13:59:31 +03002169 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002170 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002171 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002172
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002173 if (!HAS_GUC(dev_priv))
2174 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002175
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002176 p = drm_seq_file_printer(m);
2177 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002178
Chris Wilsond4225a52019-01-14 14:21:23 +00002179 with_intel_runtime_pm(dev_priv, wakeref) {
2180 u32 tmp = I915_READ(GUC_STATUS);
2181 u32 i;
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302182
Chris Wilsond4225a52019-01-14 14:21:23 +00002183 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2184 seq_printf(m, "\tBootrom status = 0x%x\n",
2185 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2186 seq_printf(m, "\tuKernel status = 0x%x\n",
2187 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2188 seq_printf(m, "\tMIA Core status = 0x%x\n",
2189 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2190 seq_puts(m, "\nScratch registers:\n");
2191 for (i = 0; i < 16; i++) {
2192 seq_printf(m, "\t%2d: \t0x%x\n",
2193 i, I915_READ(SOFT_SCRATCH(i)));
2194 }
2195 }
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302196
Alex Daifdf5d352015-08-12 15:43:37 +01002197 return 0;
2198}
2199
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002200static const char *
2201stringify_guc_log_type(enum guc_log_buffer_type type)
2202{
2203 switch (type) {
2204 case GUC_ISR_LOG_BUFFER:
2205 return "ISR";
2206 case GUC_DPC_LOG_BUFFER:
2207 return "DPC";
2208 case GUC_CRASH_DUMP_LOG_BUFFER:
2209 return "CRASH";
2210 default:
2211 MISSING_CASE(type);
2212 }
2213
2214 return "";
2215}
2216
Akash Goel5aa1ee42016-10-12 21:54:36 +05302217static void i915_guc_log_info(struct seq_file *m,
2218 struct drm_i915_private *dev_priv)
2219{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002220 struct intel_guc_log *log = &dev_priv->guc.log;
2221 enum guc_log_buffer_type type;
2222
2223 if (!intel_guc_log_relay_enabled(log)) {
2224 seq_puts(m, "GuC log relay disabled\n");
2225 return;
2226 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302227
Michał Winiarskidb557992018-03-19 10:53:43 +01002228 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302229
Michał Winiarski6a96be22018-03-19 10:53:42 +01002230 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002231 log->relay.full_count);
2232
2233 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2234 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2235 stringify_guc_log_type(type),
2236 log->stats[type].flush,
2237 log->stats[type].sampled_overflow);
2238 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302239}
2240
Dave Gordon8b417c22015-08-12 15:43:44 +01002241static void i915_guc_client_info(struct seq_file *m,
2242 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302243 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002244{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002245 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002246 enum intel_engine_id id;
Jani Nikulae5315212019-01-16 11:15:23 +02002247 u64 tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002248
Oscar Mateob09935a2017-03-22 10:39:53 -07002249 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2250 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002251 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2252 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002253
Akash Goel3b3f1652016-10-13 22:44:48 +05302254 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002255 u64 submissions = client->submissions[id];
2256 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002257 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002258 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002259 }
2260 seq_printf(m, "\tTotal: %llu\n", tot);
2261}
2262
2263static int i915_guc_info(struct seq_file *m, void *data)
2264{
David Weinehall36cdd012016-08-22 13:59:31 +03002265 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002266 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002267
Michał Winiarskidb557992018-03-19 10:53:43 +01002268 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002269 return -ENODEV;
2270
Michał Winiarskidb557992018-03-19 10:53:43 +01002271 i915_guc_log_info(m, dev_priv);
2272
2273 if (!USES_GUC_SUBMISSION(dev_priv))
2274 return 0;
2275
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002276 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002277
Michał Winiarskidb557992018-03-19 10:53:43 +01002278 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002279 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002280 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002281
Chris Wilson334636c2016-11-29 12:10:20 +00002282 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2283 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002284 if (guc->preempt_client) {
2285 seq_printf(m, "\nGuC preempt client @ %p:\n",
2286 guc->preempt_client);
2287 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2288 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002289
2290 /* Add more as required ... */
2291
2292 return 0;
2293}
2294
Oscar Mateoa8b93702017-05-10 15:04:51 +00002295static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002296{
David Weinehall36cdd012016-08-22 13:59:31 +03002297 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002298 const struct intel_guc *guc = &dev_priv->guc;
2299 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302300 struct intel_guc_client *client = guc->execbuf_client;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002301 unsigned int tmp;
2302 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002303
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002304 if (!USES_GUC_SUBMISSION(dev_priv))
2305 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002306
Oscar Mateoa8b93702017-05-10 15:04:51 +00002307 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2308 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002309
Oscar Mateoa8b93702017-05-10 15:04:51 +00002310 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2311 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002312
Oscar Mateoa8b93702017-05-10 15:04:51 +00002313 seq_printf(m, "GuC stage descriptor %u:\n", index);
2314 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2315 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2316 seq_printf(m, "\tPriority: %d\n", desc->priority);
2317 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2318 seq_printf(m, "\tEngines used: 0x%x\n",
2319 desc->engines_used);
2320 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2321 desc->db_trigger_phy,
2322 desc->db_trigger_cpu,
2323 desc->db_trigger_uk);
2324 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2325 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002326 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002327 desc->wq_addr, desc->wq_size);
2328 seq_putc(m, '\n');
2329
2330 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2331 u32 guc_engine_id = engine->guc_id;
2332 struct guc_execlist_context *lrc =
2333 &desc->lrc[guc_engine_id];
2334
2335 seq_printf(m, "\t%s LRC:\n", engine->name);
2336 seq_printf(m, "\t\tContext desc: 0x%x\n",
2337 lrc->context_desc);
2338 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2339 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2340 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2341 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2342 seq_putc(m, '\n');
2343 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002344 }
2345
Oscar Mateoa8b93702017-05-10 15:04:51 +00002346 return 0;
2347}
2348
Alex Dai4c7e77f2015-08-12 15:43:40 +01002349static int i915_guc_log_dump(struct seq_file *m, void *data)
2350{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002351 struct drm_info_node *node = m->private;
2352 struct drm_i915_private *dev_priv = node_to_i915(node);
2353 bool dump_load_err = !!node->info_ent->data;
2354 struct drm_i915_gem_object *obj = NULL;
2355 u32 *log;
2356 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002357
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002358 if (!HAS_GUC(dev_priv))
2359 return -ENODEV;
2360
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002361 if (dump_load_err)
2362 obj = dev_priv->guc.load_err_log;
2363 else if (dev_priv->guc.log.vma)
2364 obj = dev_priv->guc.log.vma->obj;
2365
2366 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002367 return 0;
2368
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002369 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2370 if (IS_ERR(log)) {
2371 DRM_DEBUG("Failed to pin object\n");
2372 seq_puts(m, "(log data unaccessible)\n");
2373 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002374 }
2375
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002376 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2377 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2378 *(log + i), *(log + i + 1),
2379 *(log + i + 2), *(log + i + 3));
2380
Alex Dai4c7e77f2015-08-12 15:43:40 +01002381 seq_putc(m, '\n');
2382
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002383 i915_gem_object_unpin_map(obj);
2384
Alex Dai4c7e77f2015-08-12 15:43:40 +01002385 return 0;
2386}
2387
Michał Winiarski4977a282018-03-19 10:53:40 +01002388static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302389{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002390 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302391
Michał Winiarski86aa8242018-03-08 16:46:53 +01002392 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002393 return -ENODEV;
2394
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002395 *val = intel_guc_log_get_level(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302396
2397 return 0;
2398}
2399
Michał Winiarski4977a282018-03-19 10:53:40 +01002400static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302401{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002402 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302403
Michał Winiarski86aa8242018-03-08 16:46:53 +01002404 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002405 return -ENODEV;
2406
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002407 return intel_guc_log_set_level(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302408}
2409
Michał Winiarski4977a282018-03-19 10:53:40 +01002410DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2411 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302412 "%lld\n");
2413
Michał Winiarski4977a282018-03-19 10:53:40 +01002414static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2415{
2416 struct drm_i915_private *dev_priv = inode->i_private;
2417
2418 if (!USES_GUC(dev_priv))
2419 return -ENODEV;
2420
2421 file->private_data = &dev_priv->guc.log;
2422
2423 return intel_guc_log_relay_open(&dev_priv->guc.log);
2424}
2425
2426static ssize_t
2427i915_guc_log_relay_write(struct file *filp,
2428 const char __user *ubuf,
2429 size_t cnt,
2430 loff_t *ppos)
2431{
2432 struct intel_guc_log *log = filp->private_data;
2433
2434 intel_guc_log_relay_flush(log);
2435
2436 return cnt;
2437}
2438
2439static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2440{
2441 struct drm_i915_private *dev_priv = inode->i_private;
2442
2443 intel_guc_log_relay_close(&dev_priv->guc.log);
2444
2445 return 0;
2446}
2447
2448static const struct file_operations i915_guc_log_relay_fops = {
2449 .owner = THIS_MODULE,
2450 .open = i915_guc_log_relay_open,
2451 .write = i915_guc_log_relay_write,
2452 .release = i915_guc_log_relay_release,
2453};
2454
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002455static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2456{
2457 u8 val;
2458 static const char * const sink_status[] = {
2459 "inactive",
2460 "transition to active, capture and display",
2461 "active, display from RFB",
2462 "active, capture and display on sink device timings",
2463 "transition to inactive, capture and display, timing re-sync",
2464 "reserved",
2465 "reserved",
2466 "sink internal error",
2467 };
2468 struct drm_connector *connector = m->private;
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002469 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002470 struct intel_dp *intel_dp =
2471 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002472 int ret;
2473
2474 if (!CAN_PSR(dev_priv)) {
2475 seq_puts(m, "PSR Unsupported\n");
2476 return -ENODEV;
2477 }
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002478
2479 if (connector->status != connector_status_connected)
2480 return -ENODEV;
2481
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002482 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2483
2484 if (ret == 1) {
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002485 const char *str = "unknown";
2486
2487 val &= DP_PSR_SINK_STATE_MASK;
2488 if (val < ARRAY_SIZE(sink_status))
2489 str = sink_status[val];
2490 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2491 } else {
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002492 return ret;
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002493 }
2494
2495 return 0;
2496}
2497DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2498
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302499static void
2500psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002501{
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002502 u32 val, status_val;
2503 const char *status = "unknown";
Chris Wilsonb86bef202017-01-16 13:06:21 +00002504
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302505 if (dev_priv->psr.psr2_enabled) {
2506 static const char * const live_status[] = {
2507 "IDLE",
2508 "CAPTURE",
2509 "CAPTURE_FS",
2510 "SLEEP",
2511 "BUFON_FW",
2512 "ML_UP",
2513 "SU_STANDBY",
2514 "FAST_SLEEP",
2515 "DEEP_SLEEP",
2516 "BUF_ON",
2517 "TG_ON"
2518 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002519 val = I915_READ(EDP_PSR2_STATUS);
2520 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2521 EDP_PSR2_STATUS_STATE_SHIFT;
2522 if (status_val < ARRAY_SIZE(live_status))
2523 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302524 } else {
2525 static const char * const live_status[] = {
2526 "IDLE",
2527 "SRDONACK",
2528 "SRDENT",
2529 "BUFOFF",
2530 "BUFON",
2531 "AUXACK",
2532 "SRDOFFACK",
2533 "SRDENT_ON",
2534 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002535 val = I915_READ(EDP_PSR_STATUS);
2536 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2537 EDP_PSR_STATUS_STATE_SHIFT;
2538 if (status_val < ARRAY_SIZE(live_status))
2539 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302540 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002541
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002542 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
Chris Wilsonb86bef202017-01-16 13:06:21 +00002543}
2544
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002545static int i915_edp_psr_status(struct seq_file *m, void *data)
2546{
David Weinehall36cdd012016-08-22 13:59:31 +03002547 struct drm_i915_private *dev_priv = node_to_i915(m->private);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002548 struct i915_psr *psr = &dev_priv->psr;
Chris Wilsona0371212019-01-14 14:21:14 +00002549 intel_wakeref_t wakeref;
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002550 const char *status;
2551 bool enabled;
2552 u32 val;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002553
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002554 if (!HAS_PSR(dev_priv))
2555 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002556
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002557 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2558 if (psr->dp)
2559 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2560 seq_puts(m, "\n");
2561
2562 if (!psr->sink_support)
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002563 return 0;
2564
Chris Wilsona0371212019-01-14 14:21:14 +00002565 wakeref = intel_runtime_pm_get(dev_priv);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002566 mutex_lock(&psr->lock);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002567
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002568 if (psr->enabled)
2569 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002570 else
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002571 status = "disabled";
2572 seq_printf(m, "PSR mode: %s\n", status);
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002573
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002574 if (!psr->enabled)
2575 goto unlock;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002576
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002577 if (psr->psr2_enabled) {
2578 val = I915_READ(EDP_PSR2_CTL);
2579 enabled = val & EDP_PSR2_ENABLE;
2580 } else {
2581 val = I915_READ(EDP_PSR_CTL);
2582 enabled = val & EDP_PSR_ENABLE;
2583 }
2584 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2585 enableddisabled(enabled), val);
2586 psr_source_status(dev_priv, m);
2587 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2588 psr->busy_frontbuffer_bits);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002589
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002590 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002591 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2592 */
David Weinehall36cdd012016-08-22 13:59:31 +03002593 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002594 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2595 seq_printf(m, "Performance counter: %u\n", val);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002596 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302597
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002598 if (psr->debug & I915_PSR_DEBUG_IRQ) {
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002599 seq_printf(m, "Last attempted entry at: %lld\n",
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002600 psr->last_entry_attempt);
2601 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002602 }
2603
José Roberto de Souzaa81f7812019-01-17 12:55:48 -08002604 if (psr->psr2_enabled) {
2605 u32 su_frames_val[3];
2606 int frame;
2607
2608 /*
2609 * Reading all 3 registers before hand to minimize crossing a
2610 * frame boundary between register reads
2611 */
2612 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2613 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2614
2615 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2616
2617 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2618 u32 su_blocks;
2619
2620 su_blocks = su_frames_val[frame / 3] &
2621 PSR2_SU_STATUS_MASK(frame);
2622 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2623 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2624 }
2625 }
2626
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002627unlock:
2628 mutex_unlock(&psr->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00002629 intel_runtime_pm_put(dev_priv, wakeref);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002630
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002631 return 0;
2632}
2633
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002634static int
2635i915_edp_psr_debug_set(void *data, u64 val)
2636{
2637 struct drm_i915_private *dev_priv = data;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002638 struct drm_modeset_acquire_ctx ctx;
Chris Wilsona0371212019-01-14 14:21:14 +00002639 intel_wakeref_t wakeref;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002640 int ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002641
2642 if (!CAN_PSR(dev_priv))
2643 return -ENODEV;
2644
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002645 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002646
Chris Wilsona0371212019-01-14 14:21:14 +00002647 wakeref = intel_runtime_pm_get(dev_priv);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002648
2649 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2650
2651retry:
2652 ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2653 if (ret == -EDEADLK) {
2654 ret = drm_modeset_backoff(&ctx);
2655 if (!ret)
2656 goto retry;
2657 }
2658
2659 drm_modeset_drop_locks(&ctx);
2660 drm_modeset_acquire_fini(&ctx);
2661
Chris Wilsona0371212019-01-14 14:21:14 +00002662 intel_runtime_pm_put(dev_priv, wakeref);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002663
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002664 return ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002665}
2666
2667static int
2668i915_edp_psr_debug_get(void *data, u64 *val)
2669{
2670 struct drm_i915_private *dev_priv = data;
2671
2672 if (!CAN_PSR(dev_priv))
2673 return -ENODEV;
2674
2675 *val = READ_ONCE(dev_priv->psr.debug);
2676 return 0;
2677}
2678
2679DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2680 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2681 "%llu\n");
2682
Jesse Barnesec013e72013-08-20 10:29:23 +01002683static int i915_energy_uJ(struct seq_file *m, void *data)
2684{
David Weinehall36cdd012016-08-22 13:59:31 +03002685 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002686 unsigned long long power;
Chris Wilsona0371212019-01-14 14:21:14 +00002687 intel_wakeref_t wakeref;
Jesse Barnesec013e72013-08-20 10:29:23 +01002688 u32 units;
2689
David Weinehall36cdd012016-08-22 13:59:31 +03002690 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002691 return -ENODEV;
2692
Chris Wilsond4225a52019-01-14 14:21:23 +00002693 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002694 return -ENODEV;
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002695
2696 units = (power & 0x1f00) >> 8;
Chris Wilsond4225a52019-01-14 14:21:23 +00002697 with_intel_runtime_pm(dev_priv, wakeref)
2698 power = I915_READ(MCH_SECP_NRG_STTS);
2699
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002700 power = (1000000 * power) >> units; /* convert to uJ */
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002701 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002702
2703 return 0;
2704}
2705
Damien Lespiau6455c872015-06-04 18:23:57 +01002706static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002707{
David Weinehall36cdd012016-08-22 13:59:31 +03002708 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002709 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002710
Chris Wilsona156e642016-04-03 14:14:21 +01002711 if (!HAS_RUNTIME_PM(dev_priv))
2712 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002713
Chris Wilson25c896bd2019-01-14 14:21:25 +00002714 seq_printf(m, "Runtime power status: %s\n",
2715 enableddisabled(!dev_priv->power_domains.wakeref));
2716
Chris Wilson6f561032018-01-24 11:36:07 +00002717 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2718 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
Paulo Zanoni371db662013-08-19 13:18:10 -03002719 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002720 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002721#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002722 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002723 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002724#else
2725 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2726#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002727 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002728 pci_power_name(pdev->current_state),
2729 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002730
Chris Wilsonbd780f32019-01-14 14:21:09 +00002731 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2732 struct drm_printer p = drm_seq_file_printer(m);
2733
2734 print_intel_runtime_pm_wakeref(dev_priv, &p);
2735 }
2736
Jesse Barnesec013e72013-08-20 10:29:23 +01002737 return 0;
2738}
2739
Imre Deak1da51582013-11-25 17:15:35 +02002740static int i915_power_domain_info(struct seq_file *m, void *unused)
2741{
David Weinehall36cdd012016-08-22 13:59:31 +03002742 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002743 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2744 int i;
2745
2746 mutex_lock(&power_domains->lock);
2747
2748 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2749 for (i = 0; i < power_domains->power_well_count; i++) {
2750 struct i915_power_well *power_well;
2751 enum intel_display_power_domain power_domain;
2752
2753 power_well = &power_domains->power_wells[i];
Imre Deakf28ec6f2018-08-06 12:58:37 +03002754 seq_printf(m, "%-25s %d\n", power_well->desc->name,
Imre Deak1da51582013-11-25 17:15:35 +02002755 power_well->count);
2756
Imre Deakf28ec6f2018-08-06 12:58:37 +03002757 for_each_power_domain(power_domain, power_well->desc->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002758 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002759 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002760 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002761 }
2762
2763 mutex_unlock(&power_domains->lock);
2764
2765 return 0;
2766}
2767
Damien Lespiaub7cec662015-10-27 14:47:01 +02002768static int i915_dmc_info(struct seq_file *m, void *unused)
2769{
David Weinehall36cdd012016-08-22 13:59:31 +03002770 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002771 intel_wakeref_t wakeref;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002772 struct intel_csr *csr;
2773
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002774 if (!HAS_CSR(dev_priv))
2775 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002776
2777 csr = &dev_priv->csr;
2778
Chris Wilsona0371212019-01-14 14:21:14 +00002779 wakeref = intel_runtime_pm_get(dev_priv);
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002780
Damien Lespiaub7cec662015-10-27 14:47:01 +02002781 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2782 seq_printf(m, "path: %s\n", csr->fw_path);
2783
2784 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002785 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002786
2787 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2788 CSR_VERSION_MINOR(csr->version));
2789
Imre Deak34b2f8d2018-10-31 22:02:20 +02002790 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2791 goto out;
2792
2793 seq_printf(m, "DC3 -> DC5 count: %d\n",
2794 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2795 SKL_CSR_DC3_DC5_COUNT));
2796 if (!IS_GEN9_LP(dev_priv))
Damien Lespiau83372062015-10-30 17:53:32 +02002797 seq_printf(m, "DC5 -> DC6 count: %d\n",
2798 I915_READ(SKL_CSR_DC5_DC6_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002799
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002800out:
2801 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2802 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2803 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2804
Chris Wilsona0371212019-01-14 14:21:14 +00002805 intel_runtime_pm_put(dev_priv, wakeref);
Damien Lespiau83372062015-10-30 17:53:32 +02002806
Damien Lespiaub7cec662015-10-27 14:47:01 +02002807 return 0;
2808}
2809
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002810static void intel_seq_print_mode(struct seq_file *m, int tabs,
2811 struct drm_display_mode *mode)
2812{
2813 int i;
2814
2815 for (i = 0; i < tabs; i++)
2816 seq_putc(m, '\t');
2817
Shayenne Moura4fb6bb82018-12-20 10:27:57 -02002818 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002819}
2820
2821static void intel_encoder_info(struct seq_file *m,
2822 struct intel_crtc *intel_crtc,
2823 struct intel_encoder *intel_encoder)
2824{
David Weinehall36cdd012016-08-22 13:59:31 +03002825 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2826 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002827 struct drm_crtc *crtc = &intel_crtc->base;
2828 struct intel_connector *intel_connector;
2829 struct drm_encoder *encoder;
2830
2831 encoder = &intel_encoder->base;
2832 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002833 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002834 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2835 struct drm_connector *connector = &intel_connector->base;
2836 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2837 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002838 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002839 drm_get_connector_status_name(connector->status));
2840 if (connector->status == connector_status_connected) {
2841 struct drm_display_mode *mode = &crtc->mode;
2842 seq_printf(m, ", mode:\n");
2843 intel_seq_print_mode(m, 2, mode);
2844 } else {
2845 seq_putc(m, '\n');
2846 }
2847 }
2848}
2849
2850static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2851{
David Weinehall36cdd012016-08-22 13:59:31 +03002852 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2853 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002854 struct drm_crtc *crtc = &intel_crtc->base;
2855 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002856 struct drm_plane_state *plane_state = crtc->primary->state;
2857 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002858
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002859 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002860 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002861 fb->base.id, plane_state->src_x >> 16,
2862 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002863 else
2864 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002865 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2866 intel_encoder_info(m, intel_crtc, intel_encoder);
2867}
2868
2869static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2870{
2871 struct drm_display_mode *mode = panel->fixed_mode;
2872
2873 seq_printf(m, "\tfixed mode:\n");
2874 intel_seq_print_mode(m, 2, mode);
2875}
2876
2877static void intel_dp_info(struct seq_file *m,
2878 struct intel_connector *intel_connector)
2879{
2880 struct intel_encoder *intel_encoder = intel_connector->encoder;
2881 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2882
2883 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03002884 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02002885 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002886 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03002887
2888 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2889 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002890}
2891
Libin Yang9a148a92016-11-28 20:07:05 +08002892static void intel_dp_mst_info(struct seq_file *m,
2893 struct intel_connector *intel_connector)
2894{
2895 struct intel_encoder *intel_encoder = intel_connector->encoder;
2896 struct intel_dp_mst_encoder *intel_mst =
2897 enc_to_mst(&intel_encoder->base);
2898 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2899 struct intel_dp *intel_dp = &intel_dig_port->dp;
2900 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2901 intel_connector->port);
2902
2903 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2904}
2905
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002906static void intel_hdmi_info(struct seq_file *m,
2907 struct intel_connector *intel_connector)
2908{
2909 struct intel_encoder *intel_encoder = intel_connector->encoder;
2910 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2911
Jani Nikula742f4912015-09-03 11:16:09 +03002912 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002913}
2914
2915static void intel_lvds_info(struct seq_file *m,
2916 struct intel_connector *intel_connector)
2917{
2918 intel_panel_info(m, &intel_connector->panel);
2919}
2920
2921static void intel_connector_info(struct seq_file *m,
2922 struct drm_connector *connector)
2923{
2924 struct intel_connector *intel_connector = to_intel_connector(connector);
2925 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08002926 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002927
2928 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03002929 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002930 drm_get_connector_status_name(connector->status));
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002931
2932 if (connector->status == connector_status_disconnected)
2933 return;
2934
2935 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2936 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2937 connector->display_info.width_mm,
2938 connector->display_info.height_mm);
2939 seq_printf(m, "\tsubpixel order: %s\n",
2940 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2941 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002942
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02002943 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002944 return;
2945
2946 switch (connector->connector_type) {
2947 case DRM_MODE_CONNECTOR_DisplayPort:
2948 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08002949 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2950 intel_dp_mst_info(m, intel_connector);
2951 else
2952 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002953 break;
2954 case DRM_MODE_CONNECTOR_LVDS:
2955 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10002956 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002957 break;
2958 case DRM_MODE_CONNECTOR_HDMIA:
2959 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03002960 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002961 intel_hdmi_info(m, intel_connector);
2962 break;
2963 default:
2964 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10002965 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002966
Jesse Barnesf103fc72014-02-20 12:39:57 -08002967 seq_printf(m, "\tmodes:\n");
2968 list_for_each_entry(mode, &connector->modes, head)
2969 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002970}
2971
Robert Fekete3abc4e02015-10-27 16:58:32 +01002972static const char *plane_type(enum drm_plane_type type)
2973{
2974 switch (type) {
2975 case DRM_PLANE_TYPE_OVERLAY:
2976 return "OVL";
2977 case DRM_PLANE_TYPE_PRIMARY:
2978 return "PRI";
2979 case DRM_PLANE_TYPE_CURSOR:
2980 return "CUR";
2981 /*
2982 * Deliberately omitting default: to generate compiler warnings
2983 * when a new drm_plane_type gets added.
2984 */
2985 }
2986
2987 return "unknown";
2988}
2989
Jani Nikula5852a152019-01-07 16:51:49 +02002990static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
Robert Fekete3abc4e02015-10-27 16:58:32 +01002991{
Robert Fekete3abc4e02015-10-27 16:58:32 +01002992 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04002993 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01002994 * will print them all to visualize if the values are misused
2995 */
Jani Nikula5852a152019-01-07 16:51:49 +02002996 snprintf(buf, bufsize,
Robert Fekete3abc4e02015-10-27 16:58:32 +01002997 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04002998 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2999 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3000 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3001 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3002 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3003 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01003004 rotation);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003005}
3006
3007static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3008{
David Weinehall36cdd012016-08-22 13:59:31 +03003009 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3010 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003011 struct intel_plane *intel_plane;
3012
3013 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3014 struct drm_plane_state *state;
3015 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003016 struct drm_format_name_buf format_name;
Jani Nikula5852a152019-01-07 16:51:49 +02003017 char rot_str[48];
Robert Fekete3abc4e02015-10-27 16:58:32 +01003018
3019 if (!plane->state) {
3020 seq_puts(m, "plane->state is NULL!\n");
3021 continue;
3022 }
3023
3024 state = plane->state;
3025
Eric Engestrom90844f02016-08-15 01:02:38 +01003026 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003027 drm_get_format_name(state->fb->format->format,
3028 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01003029 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003030 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01003031 }
3032
Jani Nikula5852a152019-01-07 16:51:49 +02003033 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
3034
Robert Fekete3abc4e02015-10-27 16:58:32 +01003035 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3036 plane->base.id,
3037 plane_type(intel_plane->base.type),
3038 state->crtc_x, state->crtc_y,
3039 state->crtc_w, state->crtc_h,
3040 (state->src_x >> 16),
3041 ((state->src_x & 0xffff) * 15625) >> 10,
3042 (state->src_y >> 16),
3043 ((state->src_y & 0xffff) * 15625) >> 10,
3044 (state->src_w >> 16),
3045 ((state->src_w & 0xffff) * 15625) >> 10,
3046 (state->src_h >> 16),
3047 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003048 format_name.str,
Jani Nikula5852a152019-01-07 16:51:49 +02003049 rot_str);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003050 }
3051}
3052
3053static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3054{
3055 struct intel_crtc_state *pipe_config;
3056 int num_scalers = intel_crtc->num_scalers;
3057 int i;
3058
3059 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3060
3061 /* Not all platformas have a scaler */
3062 if (num_scalers) {
3063 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3064 num_scalers,
3065 pipe_config->scaler_state.scaler_users,
3066 pipe_config->scaler_state.scaler_id);
3067
A.Sunil Kamath58415912016-11-20 23:20:26 +05303068 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003069 struct intel_scaler *sc =
3070 &pipe_config->scaler_state.scalers[i];
3071
3072 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3073 i, yesno(sc->in_use), sc->mode);
3074 }
3075 seq_puts(m, "\n");
3076 } else {
3077 seq_puts(m, "\tNo scalers available on this platform\n");
3078 }
3079}
3080
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003081static int i915_display_info(struct seq_file *m, void *unused)
3082{
David Weinehall36cdd012016-08-22 13:59:31 +03003083 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3084 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec22014-03-12 09:13:13 +00003085 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003086 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003087 struct drm_connector_list_iter conn_iter;
Chris Wilsona0371212019-01-14 14:21:14 +00003088 intel_wakeref_t wakeref;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003089
Chris Wilsona0371212019-01-14 14:21:14 +00003090 wakeref = intel_runtime_pm_get(dev_priv);
3091
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003092 seq_printf(m, "CRTC info\n");
3093 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003094 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003095 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003096
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003097 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003098 pipe_config = to_intel_crtc_state(crtc->base.state);
3099
Robert Fekete3abc4e02015-10-27 16:58:32 +01003100 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec22014-03-12 09:13:13 +00003101 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003102 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003103 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3104 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3105
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003106 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003107 struct intel_plane *cursor =
3108 to_intel_plane(crtc->base.cursor);
3109
Chris Wilson065f2ec22014-03-12 09:13:13 +00003110 intel_crtc_info(m, crtc);
3111
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003112 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3113 yesno(cursor->base.state->visible),
3114 cursor->base.state->crtc_x,
3115 cursor->base.state->crtc_y,
3116 cursor->base.state->crtc_w,
3117 cursor->base.state->crtc_h,
3118 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003119 intel_scaler_info(m, crtc);
3120 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003121 }
Daniel Vettercace8412014-05-22 17:56:31 +02003122
3123 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3124 yesno(!crtc->cpu_fifo_underrun_disabled),
3125 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003126 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003127 }
3128
3129 seq_printf(m, "\n");
3130 seq_printf(m, "Connector info\n");
3131 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003132 mutex_lock(&dev->mode_config.mutex);
3133 drm_connector_list_iter_begin(dev, &conn_iter);
3134 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003135 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003136 drm_connector_list_iter_end(&conn_iter);
3137 mutex_unlock(&dev->mode_config.mutex);
3138
Chris Wilsona0371212019-01-14 14:21:14 +00003139 intel_runtime_pm_put(dev_priv, wakeref);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003140
3141 return 0;
3142}
3143
Chris Wilson1b365952016-10-04 21:11:31 +01003144static int i915_engine_info(struct seq_file *m, void *unused)
3145{
3146 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3147 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +00003148 intel_wakeref_t wakeref;
Akash Goel3b3f1652016-10-13 22:44:48 +05303149 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003150 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003151
Chris Wilsona0371212019-01-14 14:21:14 +00003152 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01003153
Chris Wilson6f561032018-01-24 11:36:07 +00003154 seq_printf(m, "GT awake? %s (epoch %u)\n",
3155 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003156 seq_printf(m, "Global active requests: %d\n",
3157 dev_priv->gt.active_requests);
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003158 seq_printf(m, "CS timestamp frequency: %u kHz\n",
Jani Nikula02584042018-12-31 16:56:41 +02003159 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003160
Chris Wilsonf636edb2017-10-09 12:02:57 +01003161 p = drm_seq_file_printer(m);
3162 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003163 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003164
Chris Wilsona0371212019-01-14 14:21:14 +00003165 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilson9c870d02016-10-24 13:42:15 +01003166
Chris Wilson1b365952016-10-04 21:11:31 +01003167 return 0;
3168}
3169
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003170static int i915_rcs_topology(struct seq_file *m, void *unused)
3171{
3172 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3173 struct drm_printer p = drm_seq_file_printer(m);
3174
Jani Nikula02584042018-12-31 16:56:41 +02003175 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003176
3177 return 0;
3178}
3179
Chris Wilsonc5418a82017-10-13 21:26:19 +01003180static int i915_shrinker_info(struct seq_file *m, void *unused)
3181{
3182 struct drm_i915_private *i915 = node_to_i915(m->private);
3183
3184 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3185 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3186
3187 return 0;
3188}
3189
Daniel Vetter728e29d2014-06-25 22:01:53 +03003190static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3191{
David Weinehall36cdd012016-08-22 13:59:31 +03003192 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3193 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003194 int i;
3195
3196 drm_modeset_lock_all(dev);
3197 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3198 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3199
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003200 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003201 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003202 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003203 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003204 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003205 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003206 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003207 pll->state.hw_state.dpll_md);
3208 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3209 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3210 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07003211 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3212 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3213 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3214 pll->state.hw_state.mg_refclkin_ctl);
3215 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3216 pll->state.hw_state.mg_clktop2_coreclkctl1);
3217 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3218 pll->state.hw_state.mg_clktop2_hsclkctl);
3219 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3220 pll->state.hw_state.mg_pll_div0);
3221 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3222 pll->state.hw_state.mg_pll_div1);
3223 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3224 pll->state.hw_state.mg_pll_lf);
3225 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3226 pll->state.hw_state.mg_pll_frac_lock);
3227 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3228 pll->state.hw_state.mg_pll_ssc);
3229 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3230 pll->state.hw_state.mg_pll_bias);
3231 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3232 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003233 }
3234 drm_modeset_unlock_all(dev);
3235
3236 return 0;
3237}
3238
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003239static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003240{
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003241 struct drm_i915_private *i915 = node_to_i915(m->private);
3242 const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3243 struct i915_wa *wa;
3244 unsigned int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003245
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003246 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3247 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
Chris Wilson548764b2018-06-15 13:02:07 +01003248 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003249 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
Arun Siluvery888b5992014-08-26 14:44:51 +01003250
3251 return 0;
3252}
3253
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303254static int i915_ipc_status_show(struct seq_file *m, void *data)
3255{
3256 struct drm_i915_private *dev_priv = m->private;
3257
3258 seq_printf(m, "Isochronous Priority Control: %s\n",
3259 yesno(dev_priv->ipc_enabled));
3260 return 0;
3261}
3262
3263static int i915_ipc_status_open(struct inode *inode, struct file *file)
3264{
3265 struct drm_i915_private *dev_priv = inode->i_private;
3266
3267 if (!HAS_IPC(dev_priv))
3268 return -ENODEV;
3269
3270 return single_open(file, i915_ipc_status_show, dev_priv);
3271}
3272
3273static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3274 size_t len, loff_t *offp)
3275{
3276 struct seq_file *m = file->private_data;
3277 struct drm_i915_private *dev_priv = m->private;
Chris Wilsona0371212019-01-14 14:21:14 +00003278 intel_wakeref_t wakeref;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303279 bool enable;
Chris Wilsond4225a52019-01-14 14:21:23 +00003280 int ret;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303281
3282 ret = kstrtobool_from_user(ubuf, len, &enable);
3283 if (ret < 0)
3284 return ret;
3285
Chris Wilsond4225a52019-01-14 14:21:23 +00003286 with_intel_runtime_pm(dev_priv, wakeref) {
3287 if (!dev_priv->ipc_enabled && enable)
3288 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3289 dev_priv->wm.distrust_bios_wm = true;
3290 dev_priv->ipc_enabled = enable;
3291 intel_enable_ipc(dev_priv);
3292 }
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303293
3294 return len;
3295}
3296
3297static const struct file_operations i915_ipc_status_fops = {
3298 .owner = THIS_MODULE,
3299 .open = i915_ipc_status_open,
3300 .read = seq_read,
3301 .llseek = seq_lseek,
3302 .release = single_release,
3303 .write = i915_ipc_status_write
3304};
3305
Damien Lespiauc5511e42014-11-04 17:06:51 +00003306static int i915_ddb_info(struct seq_file *m, void *unused)
3307{
David Weinehall36cdd012016-08-22 13:59:31 +03003308 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3309 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003310 struct skl_ddb_entry *entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003311 struct intel_crtc *crtc;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003312
David Weinehall36cdd012016-08-22 13:59:31 +03003313 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003314 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003315
Damien Lespiauc5511e42014-11-04 17:06:51 +00003316 drm_modeset_lock_all(dev);
3317
Damien Lespiauc5511e42014-11-04 17:06:51 +00003318 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3319
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003320 for_each_intel_crtc(&dev_priv->drm, crtc) {
3321 struct intel_crtc_state *crtc_state =
3322 to_intel_crtc_state(crtc->base.state);
3323 enum pipe pipe = crtc->pipe;
3324 enum plane_id plane_id;
3325
Damien Lespiauc5511e42014-11-04 17:06:51 +00003326 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3327
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003328 for_each_plane_id_on_crtc(crtc, plane_id) {
3329 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3330 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
Damien Lespiauc5511e42014-11-04 17:06:51 +00003331 entry->start, entry->end,
3332 skl_ddb_entry_size(entry));
3333 }
3334
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003335 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003336 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3337 entry->end, skl_ddb_entry_size(entry));
3338 }
3339
3340 drm_modeset_unlock_all(dev);
3341
3342 return 0;
3343}
3344
Vandana Kannana54746e2015-03-03 20:53:10 +05303345static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003346 struct drm_device *dev,
3347 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303348{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003349 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303350 struct i915_drrs *drrs = &dev_priv->drrs;
3351 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003352 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003353 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303354
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003355 drm_connector_list_iter_begin(dev, &conn_iter);
3356 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003357 if (connector->state->crtc != &intel_crtc->base)
3358 continue;
3359
3360 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303361 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003362 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303363
3364 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3365 seq_puts(m, "\tVBT: DRRS_type: Static");
3366 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3367 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3368 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3369 seq_puts(m, "\tVBT: DRRS_type: None");
3370 else
3371 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3372
3373 seq_puts(m, "\n\n");
3374
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003375 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303376 struct intel_panel *panel;
3377
3378 mutex_lock(&drrs->mutex);
3379 /* DRRS Supported */
3380 seq_puts(m, "\tDRRS Supported: Yes\n");
3381
3382 /* disable_drrs() will make drrs->dp NULL */
3383 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303384 seq_puts(m, "Idleness DRRS: Disabled\n");
3385 if (dev_priv->psr.enabled)
3386 seq_puts(m,
3387 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303388 mutex_unlock(&drrs->mutex);
3389 return;
3390 }
3391
3392 panel = &drrs->dp->attached_connector->panel;
3393 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3394 drrs->busy_frontbuffer_bits);
3395
3396 seq_puts(m, "\n\t\t");
3397 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3398 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3399 vrefresh = panel->fixed_mode->vrefresh;
3400 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3401 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3402 vrefresh = panel->downclock_mode->vrefresh;
3403 } else {
3404 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3405 drrs->refresh_rate_type);
3406 mutex_unlock(&drrs->mutex);
3407 return;
3408 }
3409 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3410
3411 seq_puts(m, "\n\t\t");
3412 mutex_unlock(&drrs->mutex);
3413 } else {
3414 /* DRRS not supported. Print the VBT parameter*/
3415 seq_puts(m, "\tDRRS Supported : No");
3416 }
3417 seq_puts(m, "\n");
3418}
3419
3420static int i915_drrs_status(struct seq_file *m, void *unused)
3421{
David Weinehall36cdd012016-08-22 13:59:31 +03003422 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3423 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303424 struct intel_crtc *intel_crtc;
3425 int active_crtc_cnt = 0;
3426
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003427 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303428 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003429 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303430 active_crtc_cnt++;
3431 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3432
3433 drrs_status_per_crtc(m, dev, intel_crtc);
3434 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303435 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003436 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303437
3438 if (!active_crtc_cnt)
3439 seq_puts(m, "No active crtc found\n");
3440
3441 return 0;
3442}
3443
Dave Airlie11bed952014-05-12 15:22:27 +10003444static int i915_dp_mst_info(struct seq_file *m, void *unused)
3445{
David Weinehall36cdd012016-08-22 13:59:31 +03003446 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3447 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003448 struct intel_encoder *intel_encoder;
3449 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003450 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003451 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003452
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003453 drm_connector_list_iter_begin(dev, &conn_iter);
3454 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003455 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003456 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003457
3458 intel_encoder = intel_attached_encoder(connector);
3459 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3460 continue;
3461
3462 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003463 if (!intel_dig_port->dp.can_mst)
3464 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003465
Jim Bride40ae80c2016-04-14 10:18:37 -07003466 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003467 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003468 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3469 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003470 drm_connector_list_iter_end(&conn_iter);
3471
Dave Airlie11bed952014-05-12 15:22:27 +10003472 return 0;
3473}
3474
Todd Previteeb3394fa2015-04-18 00:04:19 -07003475static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003476 const char __user *ubuf,
3477 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003478{
3479 char *input_buffer;
3480 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003481 struct drm_device *dev;
3482 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003483 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003484 struct intel_dp *intel_dp;
3485 int val = 0;
3486
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303487 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003488
Todd Previteeb3394fa2015-04-18 00:04:19 -07003489 if (len == 0)
3490 return 0;
3491
Geliang Tang261aeba2017-05-06 23:40:17 +08003492 input_buffer = memdup_user_nul(ubuf, len);
3493 if (IS_ERR(input_buffer))
3494 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003495
Todd Previteeb3394fa2015-04-18 00:04:19 -07003496 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3497
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003498 drm_connector_list_iter_begin(dev, &conn_iter);
3499 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003500 struct intel_encoder *encoder;
3501
Todd Previteeb3394fa2015-04-18 00:04:19 -07003502 if (connector->connector_type !=
3503 DRM_MODE_CONNECTOR_DisplayPort)
3504 continue;
3505
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003506 encoder = to_intel_encoder(connector->encoder);
3507 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3508 continue;
3509
3510 if (encoder && connector->status == connector_status_connected) {
3511 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003512 status = kstrtoint(input_buffer, 10, &val);
3513 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003514 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003515 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3516 /* To prevent erroneous activation of the compliance
3517 * testing code, only accept an actual value of 1 here
3518 */
3519 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003520 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003521 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003522 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003523 }
3524 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003525 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003526 kfree(input_buffer);
3527 if (status < 0)
3528 return status;
3529
3530 *offp += len;
3531 return len;
3532}
3533
3534static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3535{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003536 struct drm_i915_private *dev_priv = m->private;
3537 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003538 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003539 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003540 struct intel_dp *intel_dp;
3541
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003542 drm_connector_list_iter_begin(dev, &conn_iter);
3543 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003544 struct intel_encoder *encoder;
3545
Todd Previteeb3394fa2015-04-18 00:04:19 -07003546 if (connector->connector_type !=
3547 DRM_MODE_CONNECTOR_DisplayPort)
3548 continue;
3549
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003550 encoder = to_intel_encoder(connector->encoder);
3551 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3552 continue;
3553
3554 if (encoder && connector->status == connector_status_connected) {
3555 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003556 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003557 seq_puts(m, "1");
3558 else
3559 seq_puts(m, "0");
3560 } else
3561 seq_puts(m, "0");
3562 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003563 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003564
3565 return 0;
3566}
3567
3568static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003569 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003570{
David Weinehall36cdd012016-08-22 13:59:31 +03003571 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003572 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003573}
3574
3575static const struct file_operations i915_displayport_test_active_fops = {
3576 .owner = THIS_MODULE,
3577 .open = i915_displayport_test_active_open,
3578 .read = seq_read,
3579 .llseek = seq_lseek,
3580 .release = single_release,
3581 .write = i915_displayport_test_active_write
3582};
3583
3584static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3585{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003586 struct drm_i915_private *dev_priv = m->private;
3587 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003588 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003589 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003590 struct intel_dp *intel_dp;
3591
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003592 drm_connector_list_iter_begin(dev, &conn_iter);
3593 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003594 struct intel_encoder *encoder;
3595
Todd Previteeb3394fa2015-04-18 00:04:19 -07003596 if (connector->connector_type !=
3597 DRM_MODE_CONNECTOR_DisplayPort)
3598 continue;
3599
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003600 encoder = to_intel_encoder(connector->encoder);
3601 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3602 continue;
3603
3604 if (encoder && connector->status == connector_status_connected) {
3605 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003606 if (intel_dp->compliance.test_type ==
3607 DP_TEST_LINK_EDID_READ)
3608 seq_printf(m, "%lx",
3609 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003610 else if (intel_dp->compliance.test_type ==
3611 DP_TEST_LINK_VIDEO_PATTERN) {
3612 seq_printf(m, "hdisplay: %d\n",
3613 intel_dp->compliance.test_data.hdisplay);
3614 seq_printf(m, "vdisplay: %d\n",
3615 intel_dp->compliance.test_data.vdisplay);
3616 seq_printf(m, "bpc: %u\n",
3617 intel_dp->compliance.test_data.bpc);
3618 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003619 } else
3620 seq_puts(m, "0");
3621 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003622 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003623
3624 return 0;
3625}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003626DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003627
3628static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3629{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003630 struct drm_i915_private *dev_priv = m->private;
3631 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003632 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003633 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003634 struct intel_dp *intel_dp;
3635
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003636 drm_connector_list_iter_begin(dev, &conn_iter);
3637 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003638 struct intel_encoder *encoder;
3639
Todd Previteeb3394fa2015-04-18 00:04:19 -07003640 if (connector->connector_type !=
3641 DRM_MODE_CONNECTOR_DisplayPort)
3642 continue;
3643
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003644 encoder = to_intel_encoder(connector->encoder);
3645 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3646 continue;
3647
3648 if (encoder && connector->status == connector_status_connected) {
3649 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003650 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003651 } else
3652 seq_puts(m, "0");
3653 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003654 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003655
3656 return 0;
3657}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003658DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003659
Jani Nikulae5315212019-01-16 11:15:23 +02003660static void wm_latency_show(struct seq_file *m, const u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003661{
David Weinehall36cdd012016-08-22 13:59:31 +03003662 struct drm_i915_private *dev_priv = m->private;
3663 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003664 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003665 int num_levels;
3666
David Weinehall36cdd012016-08-22 13:59:31 +03003667 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003668 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003669 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003670 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003671 else if (IS_G4X(dev_priv))
3672 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003673 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003674 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003675
3676 drm_modeset_lock_all(dev);
3677
3678 for (level = 0; level < num_levels; level++) {
3679 unsigned int latency = wm[level];
3680
Damien Lespiau97e94b22014-11-04 17:06:50 +00003681 /*
3682 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003683 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003684 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003685 if (INTEL_GEN(dev_priv) >= 9 ||
3686 IS_VALLEYVIEW(dev_priv) ||
3687 IS_CHERRYVIEW(dev_priv) ||
3688 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003689 latency *= 10;
3690 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003691 latency *= 5;
3692
3693 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003694 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003695 }
3696
3697 drm_modeset_unlock_all(dev);
3698}
3699
3700static int pri_wm_latency_show(struct seq_file *m, void *data)
3701{
David Weinehall36cdd012016-08-22 13:59:31 +03003702 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003703 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003704
David Weinehall36cdd012016-08-22 13:59:31 +03003705 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003706 latencies = dev_priv->wm.skl_latency;
3707 else
David Weinehall36cdd012016-08-22 13:59:31 +03003708 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003709
3710 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003711
3712 return 0;
3713}
3714
3715static int spr_wm_latency_show(struct seq_file *m, void *data)
3716{
David Weinehall36cdd012016-08-22 13:59:31 +03003717 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003718 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003719
David Weinehall36cdd012016-08-22 13:59:31 +03003720 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003721 latencies = dev_priv->wm.skl_latency;
3722 else
David Weinehall36cdd012016-08-22 13:59:31 +03003723 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003724
3725 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003726
3727 return 0;
3728}
3729
3730static int cur_wm_latency_show(struct seq_file *m, void *data)
3731{
David Weinehall36cdd012016-08-22 13:59:31 +03003732 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003733 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003734
David Weinehall36cdd012016-08-22 13:59:31 +03003735 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003736 latencies = dev_priv->wm.skl_latency;
3737 else
David Weinehall36cdd012016-08-22 13:59:31 +03003738 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003739
3740 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003741
3742 return 0;
3743}
3744
3745static int pri_wm_latency_open(struct inode *inode, struct file *file)
3746{
David Weinehall36cdd012016-08-22 13:59:31 +03003747 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003748
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003749 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003750 return -ENODEV;
3751
David Weinehall36cdd012016-08-22 13:59:31 +03003752 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003753}
3754
3755static int spr_wm_latency_open(struct inode *inode, struct file *file)
3756{
David Weinehall36cdd012016-08-22 13:59:31 +03003757 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003758
David Weinehall36cdd012016-08-22 13:59:31 +03003759 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003760 return -ENODEV;
3761
David Weinehall36cdd012016-08-22 13:59:31 +03003762 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003763}
3764
3765static int cur_wm_latency_open(struct inode *inode, struct file *file)
3766{
David Weinehall36cdd012016-08-22 13:59:31 +03003767 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003768
David Weinehall36cdd012016-08-22 13:59:31 +03003769 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003770 return -ENODEV;
3771
David Weinehall36cdd012016-08-22 13:59:31 +03003772 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003773}
3774
3775static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Jani Nikulae5315212019-01-16 11:15:23 +02003776 size_t len, loff_t *offp, u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003777{
3778 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003779 struct drm_i915_private *dev_priv = m->private;
3780 struct drm_device *dev = &dev_priv->drm;
Jani Nikulae5315212019-01-16 11:15:23 +02003781 u16 new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003782 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003783 int level;
3784 int ret;
3785 char tmp[32];
3786
David Weinehall36cdd012016-08-22 13:59:31 +03003787 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003788 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003789 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003790 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003791 else if (IS_G4X(dev_priv))
3792 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003793 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003794 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003795
Ville Syrjälä369a1342014-01-22 14:36:08 +02003796 if (len >= sizeof(tmp))
3797 return -EINVAL;
3798
3799 if (copy_from_user(tmp, ubuf, len))
3800 return -EFAULT;
3801
3802 tmp[len] = '\0';
3803
Damien Lespiau97e94b22014-11-04 17:06:50 +00003804 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3805 &new[0], &new[1], &new[2], &new[3],
3806 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003807 if (ret != num_levels)
3808 return -EINVAL;
3809
3810 drm_modeset_lock_all(dev);
3811
3812 for (level = 0; level < num_levels; level++)
3813 wm[level] = new[level];
3814
3815 drm_modeset_unlock_all(dev);
3816
3817 return len;
3818}
3819
3820
3821static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3822 size_t len, loff_t *offp)
3823{
3824 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003825 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003826 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003827
David Weinehall36cdd012016-08-22 13:59:31 +03003828 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003829 latencies = dev_priv->wm.skl_latency;
3830 else
David Weinehall36cdd012016-08-22 13:59:31 +03003831 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003832
3833 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003834}
3835
3836static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3837 size_t len, loff_t *offp)
3838{
3839 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003840 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003841 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003842
David Weinehall36cdd012016-08-22 13:59:31 +03003843 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003844 latencies = dev_priv->wm.skl_latency;
3845 else
David Weinehall36cdd012016-08-22 13:59:31 +03003846 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003847
3848 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003849}
3850
3851static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3852 size_t len, loff_t *offp)
3853{
3854 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003855 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003856 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003857
David Weinehall36cdd012016-08-22 13:59:31 +03003858 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003859 latencies = dev_priv->wm.skl_latency;
3860 else
David Weinehall36cdd012016-08-22 13:59:31 +03003861 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003862
3863 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003864}
3865
3866static const struct file_operations i915_pri_wm_latency_fops = {
3867 .owner = THIS_MODULE,
3868 .open = pri_wm_latency_open,
3869 .read = seq_read,
3870 .llseek = seq_lseek,
3871 .release = single_release,
3872 .write = pri_wm_latency_write
3873};
3874
3875static const struct file_operations i915_spr_wm_latency_fops = {
3876 .owner = THIS_MODULE,
3877 .open = spr_wm_latency_open,
3878 .read = seq_read,
3879 .llseek = seq_lseek,
3880 .release = single_release,
3881 .write = spr_wm_latency_write
3882};
3883
3884static const struct file_operations i915_cur_wm_latency_fops = {
3885 .owner = THIS_MODULE,
3886 .open = cur_wm_latency_open,
3887 .read = seq_read,
3888 .llseek = seq_lseek,
3889 .release = single_release,
3890 .write = cur_wm_latency_write
3891};
3892
Kees Cook647416f2013-03-10 14:10:06 -07003893static int
3894i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003895{
David Weinehall36cdd012016-08-22 13:59:31 +03003896 struct drm_i915_private *dev_priv = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003897
Chris Wilsond98c52c2016-04-13 17:35:05 +01003898 *val = i915_terminally_wedged(&dev_priv->gpu_error);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003899
Kees Cook647416f2013-03-10 14:10:06 -07003900 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003901}
3902
Kees Cook647416f2013-03-10 14:10:06 -07003903static int
3904i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003905{
Chris Wilson598b6b52017-03-25 13:47:35 +00003906 struct drm_i915_private *i915 = data;
Imre Deakd46c0512014-04-14 20:24:27 +03003907
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003908 /*
3909 * There is no safeguard against this debugfs entry colliding
3910 * with the hangcheck calling same i915_handle_error() in
3911 * parallel, causing an explosion. For now we assume that the
3912 * test harness is responsible enough not to inject gpu hangs
3913 * while it is writing to 'i915_wedged'
3914 */
3915
Chris Wilson598b6b52017-03-25 13:47:35 +00003916 if (i915_reset_backoff(&i915->gpu_error))
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003917 return -EAGAIN;
3918
Chris Wilsonce800752018-03-20 10:04:49 +00003919 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3920 "Manually set wedged engine mask = %llx", val);
Kees Cook647416f2013-03-10 14:10:06 -07003921 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003922}
3923
Kees Cook647416f2013-03-10 14:10:06 -07003924DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3925 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03003926 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003927
Kees Cook647416f2013-03-10 14:10:06 -07003928static int
Chris Wilson64486ae2017-03-07 15:59:08 +00003929fault_irq_set(struct drm_i915_private *i915,
3930 unsigned long *irq,
3931 unsigned long val)
3932{
3933 int err;
3934
3935 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
3936 if (err)
3937 return err;
3938
3939 err = i915_gem_wait_for_idle(i915,
3940 I915_WAIT_LOCKED |
Chris Wilsonec625fb2018-07-09 13:20:42 +01003941 I915_WAIT_INTERRUPTIBLE,
3942 MAX_SCHEDULE_TIMEOUT);
Chris Wilson64486ae2017-03-07 15:59:08 +00003943 if (err)
3944 goto err_unlock;
3945
Chris Wilson64486ae2017-03-07 15:59:08 +00003946 *irq = val;
3947 mutex_unlock(&i915->drm.struct_mutex);
3948
3949 /* Flush idle worker to disarm irq */
Chris Wilson7c262402017-10-06 11:40:38 +01003950 drain_delayed_work(&i915->gt.idle_work);
Chris Wilson64486ae2017-03-07 15:59:08 +00003951
3952 return 0;
3953
3954err_unlock:
3955 mutex_unlock(&i915->drm.struct_mutex);
3956 return err;
3957}
3958
3959static int
Chris Wilson094f9a52013-09-25 17:34:55 +01003960i915_ring_missed_irq_get(void *data, u64 *val)
3961{
David Weinehall36cdd012016-08-22 13:59:31 +03003962 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01003963
3964 *val = dev_priv->gpu_error.missed_irq_rings;
3965 return 0;
3966}
3967
3968static int
3969i915_ring_missed_irq_set(void *data, u64 val)
3970{
Chris Wilson64486ae2017-03-07 15:59:08 +00003971 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01003972
Chris Wilson64486ae2017-03-07 15:59:08 +00003973 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01003974}
3975
3976DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
3977 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
3978 "0x%08llx\n");
3979
3980static int
3981i915_ring_test_irq_get(void *data, u64 *val)
3982{
David Weinehall36cdd012016-08-22 13:59:31 +03003983 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01003984
3985 *val = dev_priv->gpu_error.test_irq_rings;
3986
3987 return 0;
3988}
3989
3990static int
3991i915_ring_test_irq_set(void *data, u64 val)
3992{
Chris Wilson64486ae2017-03-07 15:59:08 +00003993 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01003994
Chris Wilson5f521722018-09-07 12:28:51 +01003995 /* GuC keeps the user interrupt permanently enabled for submission */
3996 if (USES_GUC_SUBMISSION(i915))
3997 return -ENODEV;
3998
3999 /*
4000 * From icl, we can no longer individually mask interrupt generation
4001 * from each engine.
4002 */
4003 if (INTEL_GEN(i915) >= 11)
4004 return -ENODEV;
4005
Chris Wilson64486ae2017-03-07 15:59:08 +00004006 val &= INTEL_INFO(i915)->ring_mask;
Chris Wilson094f9a52013-09-25 17:34:55 +01004007 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004008
Chris Wilson64486ae2017-03-07 15:59:08 +00004009 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004010}
4011
4012DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4013 i915_ring_test_irq_get, i915_ring_test_irq_set,
4014 "0x%08llx\n");
4015
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004016#define DROP_UNBOUND BIT(0)
4017#define DROP_BOUND BIT(1)
4018#define DROP_RETIRE BIT(2)
4019#define DROP_ACTIVE BIT(3)
4020#define DROP_FREED BIT(4)
4021#define DROP_SHRINK_ALL BIT(5)
4022#define DROP_IDLE BIT(6)
Chris Wilson6b048702018-09-03 09:33:37 +01004023#define DROP_RESET_ACTIVE BIT(7)
4024#define DROP_RESET_SEQNO BIT(8)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004025#define DROP_ALL (DROP_UNBOUND | \
4026 DROP_BOUND | \
4027 DROP_RETIRE | \
4028 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00004029 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004030 DROP_SHRINK_ALL |\
Chris Wilson6b048702018-09-03 09:33:37 +01004031 DROP_IDLE | \
4032 DROP_RESET_ACTIVE | \
4033 DROP_RESET_SEQNO)
Kees Cook647416f2013-03-10 14:10:06 -07004034static int
4035i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004036{
Kees Cook647416f2013-03-10 14:10:06 -07004037 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00004038
Kees Cook647416f2013-03-10 14:10:06 -07004039 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004040}
4041
Kees Cook647416f2013-03-10 14:10:06 -07004042static int
4043i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004044{
Chris Wilson6b048702018-09-03 09:33:37 +01004045 struct drm_i915_private *i915 = data;
Chris Wilsona0371212019-01-14 14:21:14 +00004046 intel_wakeref_t wakeref;
Chris Wilson00c26cf2017-05-24 17:26:53 +01004047 int ret = 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004048
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004049 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4050 val, val & DROP_ALL);
Chris Wilsona0371212019-01-14 14:21:14 +00004051 wakeref = intel_runtime_pm_get(i915);
Chris Wilsondd624af2013-01-15 12:39:35 +00004052
Chris Wilson6b048702018-09-03 09:33:37 +01004053 if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4054 i915_gem_set_wedged(i915);
4055
Chris Wilsondd624af2013-01-15 12:39:35 +00004056 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4057 * on ioctls on -EAGAIN. */
Chris Wilson6b048702018-09-03 09:33:37 +01004058 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4059 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00004060 if (ret)
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03004061 goto out;
Chris Wilsondd624af2013-01-15 12:39:35 +00004062
Chris Wilson00c26cf2017-05-24 17:26:53 +01004063 if (val & DROP_ACTIVE)
Chris Wilson6b048702018-09-03 09:33:37 +01004064 ret = i915_gem_wait_for_idle(i915,
Chris Wilson00c26cf2017-05-24 17:26:53 +01004065 I915_WAIT_INTERRUPTIBLE |
Chris Wilsonec625fb2018-07-09 13:20:42 +01004066 I915_WAIT_LOCKED,
4067 MAX_SCHEDULE_TIMEOUT);
Chris Wilson00c26cf2017-05-24 17:26:53 +01004068
Chris Wilson6b048702018-09-03 09:33:37 +01004069 if (val & DROP_RETIRE)
4070 i915_retire_requests(i915);
4071
4072 mutex_unlock(&i915->drm.struct_mutex);
4073 }
4074
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00004075 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
Chris Wilson6b048702018-09-03 09:33:37 +01004076 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
Chris Wilsondd624af2013-01-15 12:39:35 +00004077
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004078 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01004079 if (val & DROP_BOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01004080 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01004081
Chris Wilson21ab4e72014-09-09 11:16:08 +01004082 if (val & DROP_UNBOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01004083 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00004084
Chris Wilson8eadc192017-03-08 14:46:22 +00004085 if (val & DROP_SHRINK_ALL)
Chris Wilson6b048702018-09-03 09:33:37 +01004086 i915_gem_shrink_all(i915);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004087 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00004088
Chris Wilson4dfacb02018-05-31 09:22:43 +01004089 if (val & DROP_IDLE) {
4090 do {
Chris Wilson6b048702018-09-03 09:33:37 +01004091 if (READ_ONCE(i915->gt.active_requests))
4092 flush_delayed_work(&i915->gt.retire_work);
4093 drain_delayed_work(&i915->gt.idle_work);
4094 } while (READ_ONCE(i915->gt.awake));
Chris Wilson4dfacb02018-05-31 09:22:43 +01004095 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004096
Chris Wilsonc9c704712018-02-19 22:06:31 +00004097 if (val & DROP_FREED)
Chris Wilson6b048702018-09-03 09:33:37 +01004098 i915_gem_drain_freed_objects(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004099
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03004100out:
Chris Wilsona0371212019-01-14 14:21:14 +00004101 intel_runtime_pm_put(i915, wakeref);
Chris Wilson9d3eb2c2018-10-15 12:58:56 +01004102
Kees Cook647416f2013-03-10 14:10:06 -07004103 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004104}
4105
Kees Cook647416f2013-03-10 14:10:06 -07004106DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4107 i915_drop_caches_get, i915_drop_caches_set,
4108 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00004109
Kees Cook647416f2013-03-10 14:10:06 -07004110static int
Kees Cook647416f2013-03-10 14:10:06 -07004111i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004112{
David Weinehall36cdd012016-08-22 13:59:31 +03004113 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00004114 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00004115 u32 snpcr = 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004116
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08004117 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004118 return -ENODEV;
4119
Chris Wilsond4225a52019-01-14 14:21:23 +00004120 with_intel_runtime_pm(dev_priv, wakeref)
4121 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004122
Kees Cook647416f2013-03-10 14:10:06 -07004123 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004124
Kees Cook647416f2013-03-10 14:10:06 -07004125 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004126}
4127
Kees Cook647416f2013-03-10 14:10:06 -07004128static int
4129i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004130{
David Weinehall36cdd012016-08-22 13:59:31 +03004131 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00004132 intel_wakeref_t wakeref;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004133
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08004134 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004135 return -ENODEV;
4136
Kees Cook647416f2013-03-10 14:10:06 -07004137 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004138 return -EINVAL;
4139
Kees Cook647416f2013-03-10 14:10:06 -07004140 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Chris Wilsond4225a52019-01-14 14:21:23 +00004141 with_intel_runtime_pm(dev_priv, wakeref) {
4142 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004143
Chris Wilsond4225a52019-01-14 14:21:23 +00004144 /* Update the cache sharing policy here as well */
4145 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4146 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4147 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
4148 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4149 }
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004150
Kees Cook647416f2013-03-10 14:10:06 -07004151 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004152}
4153
Kees Cook647416f2013-03-10 14:10:06 -07004154DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4155 i915_cache_sharing_get, i915_cache_sharing_set,
4156 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004157
David Weinehall36cdd012016-08-22 13:59:31 +03004158static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004159 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004160{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004161#define SS_MAX 2
4162 const int ss_max = SS_MAX;
4163 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004164 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004165
4166 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4167 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4168 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4169 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4170
4171 for (ss = 0; ss < ss_max; ss++) {
4172 unsigned int eu_cnt;
4173
4174 if (sig1[ss] & CHV_SS_PG_ENABLE)
4175 /* skip disabled subslice */
4176 continue;
4177
Imre Deakf08a0c92016-08-31 19:13:04 +03004178 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004179 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004180 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4181 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4182 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4183 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004184 sseu->eu_total += eu_cnt;
4185 sseu->eu_per_subslice = max_t(unsigned int,
4186 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004187 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004188#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004189}
4190
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004191static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4192 struct sseu_dev_info *sseu)
4193{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004194#define SS_MAX 6
Jani Nikula02584042018-12-31 16:56:41 +02004195 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004196 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004197 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004198
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004199 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004200 /*
4201 * FIXME: Valid SS Mask respects the spec and read
Alexandre Belloni3c64ea82018-11-20 16:14:15 +01004202 * only valid bits for those registers, excluding reserved
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004203 * although this seems wrong because it would leave many
4204 * subslices without ACK.
4205 */
4206 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4207 GEN10_PGCTL_VALID_SS_MASK(s);
4208 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4209 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4210 }
4211
4212 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4213 GEN9_PGCTL_SSA_EU19_ACK |
4214 GEN9_PGCTL_SSA_EU210_ACK |
4215 GEN9_PGCTL_SSA_EU311_ACK;
4216 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4217 GEN9_PGCTL_SSB_EU19_ACK |
4218 GEN9_PGCTL_SSB_EU210_ACK |
4219 GEN9_PGCTL_SSB_EU311_ACK;
4220
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004221 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004222 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4223 /* skip disabled slice */
4224 continue;
4225
4226 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004227 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004228
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004229 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004230 unsigned int eu_cnt;
4231
4232 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4233 /* skip disabled subslice */
4234 continue;
4235
4236 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4237 eu_mask[ss % 2]);
4238 sseu->eu_total += eu_cnt;
4239 sseu->eu_per_subslice = max_t(unsigned int,
4240 sseu->eu_per_subslice,
4241 eu_cnt);
4242 }
4243 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004244#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004245}
4246
David Weinehall36cdd012016-08-22 13:59:31 +03004247static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004248 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004249{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004250#define SS_MAX 3
Jani Nikula02584042018-12-31 16:56:41 +02004251 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004252 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004253 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004254
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004255 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004256 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4257 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4258 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4259 }
4260
Jeff McGee5d395252015-04-03 18:13:17 -07004261 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4262 GEN9_PGCTL_SSA_EU19_ACK |
4263 GEN9_PGCTL_SSA_EU210_ACK |
4264 GEN9_PGCTL_SSA_EU311_ACK;
4265 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4266 GEN9_PGCTL_SSB_EU19_ACK |
4267 GEN9_PGCTL_SSB_EU210_ACK |
4268 GEN9_PGCTL_SSB_EU311_ACK;
4269
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004270 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004271 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4272 /* skip disabled slice */
4273 continue;
4274
Imre Deakf08a0c92016-08-31 19:13:04 +03004275 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004276
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004277 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004278 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004279 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004280
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004281 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004282 unsigned int eu_cnt;
4283
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004284 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004285 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4286 /* skip disabled subslice */
4287 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004288
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004289 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004290 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004291
Jeff McGee5d395252015-04-03 18:13:17 -07004292 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4293 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004294 sseu->eu_total += eu_cnt;
4295 sseu->eu_per_subslice = max_t(unsigned int,
4296 sseu->eu_per_subslice,
4297 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004298 }
4299 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004300#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004301}
4302
David Weinehall36cdd012016-08-22 13:59:31 +03004303static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004304 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004305{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004306 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004307 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004308
Imre Deakf08a0c92016-08-31 19:13:04 +03004309 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004310
Imre Deakf08a0c92016-08-31 19:13:04 +03004311 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004312 sseu->eu_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004313 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004314 for (s = 0; s < fls(sseu->slice_mask); s++) {
4315 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004316 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004317 }
Imre Deak57ec1712016-08-31 19:13:05 +03004318 sseu->eu_total = sseu->eu_per_subslice *
4319 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004320
4321 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004322 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004323 u8 subslice_7eu =
Jani Nikula02584042018-12-31 16:56:41 +02004324 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004325
Imre Deak915490d2016-08-31 19:13:01 +03004326 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004327 }
4328 }
4329}
4330
Imre Deak615d8902016-08-31 19:13:03 +03004331static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4332 const struct sseu_dev_info *sseu)
4333{
4334 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4335 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004336 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004337
Imre Deakc67ba532016-08-31 19:13:06 +03004338 seq_printf(m, " %s Slice Mask: %04x\n", type,
4339 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004340 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004341 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004342 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004343 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004344 for (s = 0; s < fls(sseu->slice_mask); s++) {
4345 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4346 s, hweight8(sseu->subslice_mask[s]));
4347 }
Imre Deak615d8902016-08-31 19:13:03 +03004348 seq_printf(m, " %s EU Total: %u\n", type,
4349 sseu->eu_total);
4350 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4351 sseu->eu_per_subslice);
4352
4353 if (!is_available_info)
4354 return;
4355
4356 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4357 if (HAS_POOLED_EU(dev_priv))
4358 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4359
4360 seq_printf(m, " Has Slice Power Gating: %s\n",
4361 yesno(sseu->has_slice_pg));
4362 seq_printf(m, " Has Subslice Power Gating: %s\n",
4363 yesno(sseu->has_subslice_pg));
4364 seq_printf(m, " Has EU Power Gating: %s\n",
4365 yesno(sseu->has_eu_pg));
4366}
4367
Jeff McGee38732182015-02-13 10:27:54 -06004368static int i915_sseu_status(struct seq_file *m, void *unused)
4369{
David Weinehall36cdd012016-08-22 13:59:31 +03004370 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004371 struct sseu_dev_info sseu;
Chris Wilsona0371212019-01-14 14:21:14 +00004372 intel_wakeref_t wakeref;
Jeff McGee38732182015-02-13 10:27:54 -06004373
David Weinehall36cdd012016-08-22 13:59:31 +03004374 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004375 return -ENODEV;
4376
4377 seq_puts(m, "SSEU Device Info\n");
Jani Nikula02584042018-12-31 16:56:41 +02004378 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004379
Jeff McGee7f992ab2015-02-13 10:27:55 -06004380 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004381 memset(&sseu, 0, sizeof(sseu));
Jani Nikula02584042018-12-31 16:56:41 +02004382 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4383 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004384 sseu.max_eus_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004385 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004386
Chris Wilsond4225a52019-01-14 14:21:23 +00004387 with_intel_runtime_pm(dev_priv, wakeref) {
4388 if (IS_CHERRYVIEW(dev_priv))
4389 cherryview_sseu_device_status(dev_priv, &sseu);
4390 else if (IS_BROADWELL(dev_priv))
4391 broadwell_sseu_device_status(dev_priv, &sseu);
4392 else if (IS_GEN(dev_priv, 9))
4393 gen9_sseu_device_status(dev_priv, &sseu);
4394 else if (INTEL_GEN(dev_priv) >= 10)
4395 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004396 }
David Weinehall238010e2016-08-01 17:33:27 +03004397
Imre Deak615d8902016-08-31 19:13:03 +03004398 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004399
Jeff McGee38732182015-02-13 10:27:54 -06004400 return 0;
4401}
4402
Ben Widawsky6d794d42011-04-25 11:25:56 -07004403static int i915_forcewake_open(struct inode *inode, struct file *file)
4404{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004405 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004406
Chris Wilsond7a133d2017-09-07 14:44:41 +01004407 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004408 return 0;
4409
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004410 file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
Chris Wilsond7a133d2017-09-07 14:44:41 +01004411 intel_uncore_forcewake_user_get(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004412
4413 return 0;
4414}
4415
Ben Widawskyc43b5632012-04-16 14:07:40 -07004416static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004417{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004418 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004419
Chris Wilsond7a133d2017-09-07 14:44:41 +01004420 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004421 return 0;
4422
Chris Wilsond7a133d2017-09-07 14:44:41 +01004423 intel_uncore_forcewake_user_put(i915);
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004424 intel_runtime_pm_put(i915,
4425 (intel_wakeref_t)(uintptr_t)file->private_data);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004426
4427 return 0;
4428}
4429
4430static const struct file_operations i915_forcewake_fops = {
4431 .owner = THIS_MODULE,
4432 .open = i915_forcewake_open,
4433 .release = i915_forcewake_release,
4434};
4435
Lyude317eaa92017-02-03 21:18:25 -05004436static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4437{
4438 struct drm_i915_private *dev_priv = m->private;
4439 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4440
Lyude Paul6fc5d782018-11-20 19:37:17 -05004441 /* Synchronize with everything first in case there's been an HPD
4442 * storm, but we haven't finished handling it in the kernel yet
4443 */
4444 synchronize_irq(dev_priv->drm.irq);
4445 flush_work(&dev_priv->hotplug.dig_port_work);
4446 flush_work(&dev_priv->hotplug.hotplug_work);
4447
Lyude317eaa92017-02-03 21:18:25 -05004448 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4449 seq_printf(m, "Detected: %s\n",
4450 yesno(delayed_work_pending(&hotplug->reenable_work)));
4451
4452 return 0;
4453}
4454
4455static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4456 const char __user *ubuf, size_t len,
4457 loff_t *offp)
4458{
4459 struct seq_file *m = file->private_data;
4460 struct drm_i915_private *dev_priv = m->private;
4461 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4462 unsigned int new_threshold;
4463 int i;
4464 char *newline;
4465 char tmp[16];
4466
4467 if (len >= sizeof(tmp))
4468 return -EINVAL;
4469
4470 if (copy_from_user(tmp, ubuf, len))
4471 return -EFAULT;
4472
4473 tmp[len] = '\0';
4474
4475 /* Strip newline, if any */
4476 newline = strchr(tmp, '\n');
4477 if (newline)
4478 *newline = '\0';
4479
4480 if (strcmp(tmp, "reset") == 0)
4481 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4482 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4483 return -EINVAL;
4484
4485 if (new_threshold > 0)
4486 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4487 new_threshold);
4488 else
4489 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4490
4491 spin_lock_irq(&dev_priv->irq_lock);
4492 hotplug->hpd_storm_threshold = new_threshold;
4493 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4494 for_each_hpd_pin(i)
4495 hotplug->stats[i].count = 0;
4496 spin_unlock_irq(&dev_priv->irq_lock);
4497
4498 /* Re-enable hpd immediately if we were in an irq storm */
4499 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4500
4501 return len;
4502}
4503
4504static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4505{
4506 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4507}
4508
4509static const struct file_operations i915_hpd_storm_ctl_fops = {
4510 .owner = THIS_MODULE,
4511 .open = i915_hpd_storm_ctl_open,
4512 .read = seq_read,
4513 .llseek = seq_lseek,
4514 .release = single_release,
4515 .write = i915_hpd_storm_ctl_write
4516};
4517
Lyude Paul9a64c652018-11-06 16:30:16 -05004518static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4519{
4520 struct drm_i915_private *dev_priv = m->private;
4521
4522 seq_printf(m, "Enabled: %s\n",
4523 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4524
4525 return 0;
4526}
4527
4528static int
4529i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4530{
4531 return single_open(file, i915_hpd_short_storm_ctl_show,
4532 inode->i_private);
4533}
4534
4535static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4536 const char __user *ubuf,
4537 size_t len, loff_t *offp)
4538{
4539 struct seq_file *m = file->private_data;
4540 struct drm_i915_private *dev_priv = m->private;
4541 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4542 char *newline;
4543 char tmp[16];
4544 int i;
4545 bool new_state;
4546
4547 if (len >= sizeof(tmp))
4548 return -EINVAL;
4549
4550 if (copy_from_user(tmp, ubuf, len))
4551 return -EFAULT;
4552
4553 tmp[len] = '\0';
4554
4555 /* Strip newline, if any */
4556 newline = strchr(tmp, '\n');
4557 if (newline)
4558 *newline = '\0';
4559
4560 /* Reset to the "default" state for this system */
4561 if (strcmp(tmp, "reset") == 0)
4562 new_state = !HAS_DP_MST(dev_priv);
4563 else if (kstrtobool(tmp, &new_state) != 0)
4564 return -EINVAL;
4565
4566 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4567 new_state ? "En" : "Dis");
4568
4569 spin_lock_irq(&dev_priv->irq_lock);
4570 hotplug->hpd_short_storm_enabled = new_state;
4571 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4572 for_each_hpd_pin(i)
4573 hotplug->stats[i].count = 0;
4574 spin_unlock_irq(&dev_priv->irq_lock);
4575
4576 /* Re-enable hpd immediately if we were in an irq storm */
4577 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4578
4579 return len;
4580}
4581
4582static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4583 .owner = THIS_MODULE,
4584 .open = i915_hpd_short_storm_ctl_open,
4585 .read = seq_read,
4586 .llseek = seq_lseek,
4587 .release = single_release,
4588 .write = i915_hpd_short_storm_ctl_write,
4589};
4590
C, Ramalingam35954e82017-11-08 00:08:23 +05304591static int i915_drrs_ctl_set(void *data, u64 val)
4592{
4593 struct drm_i915_private *dev_priv = data;
4594 struct drm_device *dev = &dev_priv->drm;
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004595 struct intel_crtc *crtc;
C, Ramalingam35954e82017-11-08 00:08:23 +05304596
4597 if (INTEL_GEN(dev_priv) < 7)
4598 return -ENODEV;
4599
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004600 for_each_intel_crtc(dev, crtc) {
4601 struct drm_connector_list_iter conn_iter;
4602 struct intel_crtc_state *crtc_state;
4603 struct drm_connector *connector;
4604 struct drm_crtc_commit *commit;
4605 int ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304606
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004607 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4608 if (ret)
4609 return ret;
4610
4611 crtc_state = to_intel_crtc_state(crtc->base.state);
4612
4613 if (!crtc_state->base.active ||
4614 !crtc_state->has_drrs)
4615 goto out;
4616
4617 commit = crtc_state->base.commit;
4618 if (commit) {
4619 ret = wait_for_completion_interruptible(&commit->hw_done);
4620 if (ret)
4621 goto out;
4622 }
4623
4624 drm_connector_list_iter_begin(dev, &conn_iter);
4625 drm_for_each_connector_iter(connector, &conn_iter) {
4626 struct intel_encoder *encoder;
4627 struct intel_dp *intel_dp;
4628
4629 if (!(crtc_state->base.connector_mask &
4630 drm_connector_mask(connector)))
4631 continue;
4632
4633 encoder = intel_attached_encoder(connector);
C, Ramalingam35954e82017-11-08 00:08:23 +05304634 if (encoder->type != INTEL_OUTPUT_EDP)
4635 continue;
4636
4637 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4638 val ? "en" : "dis", val);
4639
4640 intel_dp = enc_to_intel_dp(&encoder->base);
4641 if (val)
4642 intel_edp_drrs_enable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004643 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304644 else
4645 intel_edp_drrs_disable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004646 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304647 }
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004648 drm_connector_list_iter_end(&conn_iter);
4649
4650out:
4651 drm_modeset_unlock(&crtc->base.mutex);
4652 if (ret)
4653 return ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304654 }
C, Ramalingam35954e82017-11-08 00:08:23 +05304655
4656 return 0;
4657}
4658
4659DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4660
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004661static ssize_t
4662i915_fifo_underrun_reset_write(struct file *filp,
4663 const char __user *ubuf,
4664 size_t cnt, loff_t *ppos)
4665{
4666 struct drm_i915_private *dev_priv = filp->private_data;
4667 struct intel_crtc *intel_crtc;
4668 struct drm_device *dev = &dev_priv->drm;
4669 int ret;
4670 bool reset;
4671
4672 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4673 if (ret)
4674 return ret;
4675
4676 if (!reset)
4677 return cnt;
4678
4679 for_each_intel_crtc(dev, intel_crtc) {
4680 struct drm_crtc_commit *commit;
4681 struct intel_crtc_state *crtc_state;
4682
4683 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4684 if (ret)
4685 return ret;
4686
4687 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4688 commit = crtc_state->base.commit;
4689 if (commit) {
4690 ret = wait_for_completion_interruptible(&commit->hw_done);
4691 if (!ret)
4692 ret = wait_for_completion_interruptible(&commit->flip_done);
4693 }
4694
4695 if (!ret && crtc_state->base.active) {
4696 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4697 pipe_name(intel_crtc->pipe));
4698
4699 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4700 }
4701
4702 drm_modeset_unlock(&intel_crtc->base.mutex);
4703
4704 if (ret)
4705 return ret;
4706 }
4707
4708 ret = intel_fbc_reset_underrun(dev_priv);
4709 if (ret)
4710 return ret;
4711
4712 return cnt;
4713}
4714
4715static const struct file_operations i915_fifo_underrun_reset_ops = {
4716 .owner = THIS_MODULE,
4717 .open = simple_open,
4718 .write = i915_fifo_underrun_reset_write,
4719 .llseek = default_llseek,
4720};
4721
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004722static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004723 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004724 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004725 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004726 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004727 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004728 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004729 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004730 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004731 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004732 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004733 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004734 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004735 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304736 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004737 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004738 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004739 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004740 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004741 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004742 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004743 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004744 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004745 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004746 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004747 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004748 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004749 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004750 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004751 {"i915_swizzle_info", i915_swizzle_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004752 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004753 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004754 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004755 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004756 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004757 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004758 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004759 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004760 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004761 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004762 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004763 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004764 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004765 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004766 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304767 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004768 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004769};
Ben Gamari27c202a2009-07-01 22:26:52 -04004770#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004771
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004772static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004773 const char *name;
4774 const struct file_operations *fops;
4775} i915_debugfs_files[] = {
4776 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004777 {"i915_cache_sharing", &i915_cache_sharing_fops},
Chris Wilson094f9a52013-09-25 17:34:55 +01004778 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4779 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004780 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004781#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004782 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004783 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004784#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004785 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004786 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4787 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4788 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004789 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004790 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4791 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304792 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004793 {"i915_guc_log_level", &i915_guc_log_level_fops},
4794 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304795 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
Lyude Paul9a64c652018-11-06 16:30:16 -05004796 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304797 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004798 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4799 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004800};
4801
Chris Wilson1dac8912016-06-24 14:00:17 +01004802int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004803{
Chris Wilson91c8a322016-07-05 10:40:23 +01004804 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004805 struct dentry *ent;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004806 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004807
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004808 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4809 minor->debugfs_root, to_i915(minor->dev),
4810 &i915_forcewake_fops);
4811 if (!ent)
4812 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004813
Daniel Vetter34b96742013-07-04 20:49:44 +02004814 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004815 ent = debugfs_create_file(i915_debugfs_files[i].name,
4816 S_IRUGO | S_IWUSR,
4817 minor->debugfs_root,
4818 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004819 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004820 if (!ent)
4821 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004822 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004823
Ben Gamari27c202a2009-07-01 22:26:52 -04004824 return drm_debugfs_create_files(i915_debugfs_list,
4825 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004826 minor->debugfs_root, minor);
4827}
4828
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004829struct dpcd_block {
4830 /* DPCD dump start address. */
4831 unsigned int offset;
4832 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4833 unsigned int end;
4834 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4835 size_t size;
4836 /* Only valid for eDP. */
4837 bool edp;
4838};
4839
4840static const struct dpcd_block i915_dpcd_debug[] = {
4841 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4842 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4843 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4844 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4845 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4846 { .offset = DP_SET_POWER },
4847 { .offset = DP_EDP_DPCD_REV },
4848 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4849 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4850 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4851};
4852
4853static int i915_dpcd_show(struct seq_file *m, void *data)
4854{
4855 struct drm_connector *connector = m->private;
4856 struct intel_dp *intel_dp =
4857 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Jani Nikulae5315212019-01-16 11:15:23 +02004858 u8 buf[16];
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004859 ssize_t err;
4860 int i;
4861
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004862 if (connector->status != connector_status_connected)
4863 return -ENODEV;
4864
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004865 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4866 const struct dpcd_block *b = &i915_dpcd_debug[i];
4867 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4868
4869 if (b->edp &&
4870 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4871 continue;
4872
4873 /* low tech for now */
4874 if (WARN_ON(size > sizeof(buf)))
4875 continue;
4876
4877 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
Chris Wilson65404c82018-10-10 09:17:06 +01004878 if (err < 0)
4879 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4880 else
4881 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004882 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004883
4884 return 0;
4885}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004886DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004887
David Weinehallecbd6782016-08-23 12:23:56 +03004888static int i915_panel_show(struct seq_file *m, void *data)
4889{
4890 struct drm_connector *connector = m->private;
4891 struct intel_dp *intel_dp =
4892 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4893
4894 if (connector->status != connector_status_connected)
4895 return -ENODEV;
4896
4897 seq_printf(m, "Panel power up delay: %d\n",
4898 intel_dp->panel_power_up_delay);
4899 seq_printf(m, "Panel power down delay: %d\n",
4900 intel_dp->panel_power_down_delay);
4901 seq_printf(m, "Backlight on delay: %d\n",
4902 intel_dp->backlight_on_delay);
4903 seq_printf(m, "Backlight off delay: %d\n",
4904 intel_dp->backlight_off_delay);
4905
4906 return 0;
4907}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004908DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004909
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304910static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4911{
4912 struct drm_connector *connector = m->private;
4913 struct intel_connector *intel_connector = to_intel_connector(connector);
4914
4915 if (connector->status != connector_status_connected)
4916 return -ENODEV;
4917
4918 /* HDCP is supported by connector */
Ramalingam Cd3dacc72018-10-29 15:15:46 +05304919 if (!intel_connector->hdcp.shim)
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304920 return -EINVAL;
4921
4922 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4923 connector->base.id);
4924 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4925 "None" : "HDCP1.4");
4926 seq_puts(m, "\n");
4927
4928 return 0;
4929}
4930DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4931
Manasi Navaree845f092018-12-05 16:54:07 -08004932static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4933{
4934 struct drm_connector *connector = m->private;
4935 struct drm_device *dev = connector->dev;
4936 struct drm_crtc *crtc;
4937 struct intel_dp *intel_dp;
4938 struct drm_modeset_acquire_ctx ctx;
4939 struct intel_crtc_state *crtc_state = NULL;
4940 int ret = 0;
4941 bool try_again = false;
4942
4943 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4944
4945 do {
Manasi Navare6afe8922018-12-19 15:51:20 -08004946 try_again = false;
Manasi Navaree845f092018-12-05 16:54:07 -08004947 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4948 &ctx);
4949 if (ret) {
4950 ret = -EINTR;
4951 break;
4952 }
4953 crtc = connector->state->crtc;
4954 if (connector->status != connector_status_connected || !crtc) {
4955 ret = -ENODEV;
4956 break;
4957 }
4958 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4959 if (ret == -EDEADLK) {
4960 ret = drm_modeset_backoff(&ctx);
4961 if (!ret) {
4962 try_again = true;
4963 continue;
4964 }
4965 break;
4966 } else if (ret) {
4967 break;
4968 }
4969 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4970 crtc_state = to_intel_crtc_state(crtc->state);
4971 seq_printf(m, "DSC_Enabled: %s\n",
4972 yesno(crtc_state->dsc_params.compression_enable));
Radhakrishna Sripadafed85692019-01-09 13:14:14 -08004973 seq_printf(m, "DSC_Sink_Support: %s\n",
4974 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
Manasi Navaree845f092018-12-05 16:54:07 -08004975 if (!intel_dp_is_edp(intel_dp))
4976 seq_printf(m, "FEC_Sink_Support: %s\n",
4977 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4978 } while (try_again);
4979
4980 drm_modeset_drop_locks(&ctx);
4981 drm_modeset_acquire_fini(&ctx);
4982
4983 return ret;
4984}
4985
4986static ssize_t i915_dsc_fec_support_write(struct file *file,
4987 const char __user *ubuf,
4988 size_t len, loff_t *offp)
4989{
4990 bool dsc_enable = false;
4991 int ret;
4992 struct drm_connector *connector =
4993 ((struct seq_file *)file->private_data)->private;
4994 struct intel_encoder *encoder = intel_attached_encoder(connector);
4995 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4996
4997 if (len == 0)
4998 return 0;
4999
5000 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
5001 len);
5002
5003 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
5004 if (ret < 0)
5005 return ret;
5006
5007 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
5008 (dsc_enable) ? "true" : "false");
5009 intel_dp->force_dsc_en = dsc_enable;
5010
5011 *offp += len;
5012 return len;
5013}
5014
5015static int i915_dsc_fec_support_open(struct inode *inode,
5016 struct file *file)
5017{
5018 return single_open(file, i915_dsc_fec_support_show,
5019 inode->i_private);
5020}
5021
5022static const struct file_operations i915_dsc_fec_support_fops = {
5023 .owner = THIS_MODULE,
5024 .open = i915_dsc_fec_support_open,
5025 .read = seq_read,
5026 .llseek = seq_lseek,
5027 .release = single_release,
5028 .write = i915_dsc_fec_support_write
5029};
5030
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005031/**
5032 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5033 * @connector: pointer to a registered drm_connector
5034 *
5035 * Cleanup will be done by drm_connector_unregister() through a call to
5036 * drm_debugfs_connector_remove().
5037 *
5038 * Returns 0 on success, negative error codes on error.
5039 */
5040int i915_debugfs_connector_add(struct drm_connector *connector)
5041{
5042 struct dentry *root = connector->debugfs_entry;
Manasi Navaree845f092018-12-05 16:54:07 -08005043 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005044
5045 /* The connector must have been registered beforehands. */
5046 if (!root)
5047 return -ENODEV;
5048
5049 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5050 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03005051 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5052 connector, &i915_dpcd_fops);
5053
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07005054 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
David Weinehallecbd6782016-08-23 12:23:56 +03005055 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5056 connector, &i915_panel_fops);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07005057 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5058 connector, &i915_psr_sink_status_fops);
5059 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005060
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05305061 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5062 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5063 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5064 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5065 connector, &i915_hdcp_sink_capability_fops);
5066 }
5067
Manasi Navaree845f092018-12-05 16:54:07 -08005068 if (INTEL_GEN(dev_priv) >= 10 &&
5069 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5070 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
5071 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
5072 connector, &i915_dsc_fec_support_fops);
5073
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005074 return 0;
5075}