blob: c48733a15e637a05b635909cce90c419731e9f2c [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsonf3cd4742009-10-13 22:20:20 +010029#include <linux/debugfs.h>
Chris Wilsone637d2c2017-03-16 13:19:57 +000030#include <linux/sort.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010031#include <linux/sched/mm.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010032#include "intel_drv.h"
Sagar Arun Kamblea2695742017-11-16 19:02:41 +053033#include "intel_guc_submission.h"
Ben Gamari20172632009-02-17 20:08:50 -050034
Chris Wilson9f588922019-01-16 15:33:04 +000035#include "i915_reset.h"
36
David Weinehall36cdd012016-08-22 13:59:31 +030037static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
38{
39 return to_i915(node->minor->dev);
40}
41
Chris Wilson70d39fe2010-08-25 16:03:34 +010042static int i915_capabilities(struct seq_file *m, void *data)
43{
David Weinehall36cdd012016-08-22 13:59:31 +030044 struct drm_i915_private *dev_priv = node_to_i915(m->private);
45 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000046 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010047
David Weinehall36cdd012016-08-22 13:59:31 +030048 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020049 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030050 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000051
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000052 intel_device_info_dump_flags(info, &p);
Jani Nikula02584042018-12-31 16:56:41 +020053 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000054 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010055
Chris Wilson418e3cd2017-02-06 21:36:08 +000056 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000057 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000058 kernel_param_unlock(THIS_MODULE);
59
Chris Wilson70d39fe2010-08-25 16:03:34 +010060 return 0;
61}
Ben Gamari433e12f2009-02-17 20:08:51 -050062
Imre Deaka7363de2016-05-12 16:18:52 +030063static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000064{
Chris Wilson573adb32016-08-04 16:32:39 +010065 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000066}
67
Imre Deaka7363de2016-05-12 16:18:52 +030068static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010069{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010070 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010071}
72
Imre Deaka7363de2016-05-12 16:18:52 +030073static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000074{
Chris Wilson3e510a82016-08-05 10:14:23 +010075 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040076 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010077 case I915_TILING_NONE: return ' ';
78 case I915_TILING_X: return 'X';
79 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040080 }
Chris Wilsona6172a82009-02-11 14:26:38 +000081}
82
Imre Deaka7363de2016-05-12 16:18:52 +030083static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070084{
Chris Wilsona65adaf2017-10-09 09:43:57 +010085 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010086}
87
Imre Deaka7363de2016-05-12 16:18:52 +030088static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010089{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010090 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -070091}
92
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010093static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
94{
95 u64 size = 0;
96 struct i915_vma *vma;
97
Chris Wilsone2189dd2017-12-07 21:14:07 +000098 for_each_ggtt_vma(vma, obj) {
99 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100100 size += vma->node.size;
101 }
102
103 return size;
104}
105
Matthew Auld7393b7e2017-10-06 23:18:28 +0100106static const char *
107stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
108{
109 size_t x = 0;
110
111 switch (page_sizes) {
112 case 0:
113 return "";
114 case I915_GTT_PAGE_SIZE_4K:
115 return "4K";
116 case I915_GTT_PAGE_SIZE_64K:
117 return "64K";
118 case I915_GTT_PAGE_SIZE_2M:
119 return "2M";
120 default:
121 if (!buf)
122 return "M";
123
124 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
125 x += snprintf(buf + x, len - x, "2M, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
127 x += snprintf(buf + x, len - x, "64K, ");
128 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
129 x += snprintf(buf + x, len - x, "4K, ");
130 buf[x-2] = '\0';
131
132 return buf;
133 }
134}
135
Chris Wilson37811fc2010-08-25 22:45:57 +0100136static void
137describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
138{
Chris Wilsonb4716182015-04-27 13:41:17 +0100139 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000140 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700141 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100142 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800143 int pin_count = 0;
144
Chris Wilson188c1ab2016-04-03 14:14:20 +0100145 lockdep_assert_held(&obj->base.dev->struct_mutex);
146
Chris Wilsond07f0e52016-10-28 13:58:44 +0100147 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100148 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100149 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100150 get_pin_flag(obj),
151 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700152 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100153 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800154 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100155 obj->read_domains,
156 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300157 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100158 obj->mm.dirty ? " dirty" : "",
159 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100160 if (obj->base.name)
161 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson528cbd12019-01-28 10:23:54 +0000162 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100163 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800164 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300165 }
166 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100167 if (obj->pin_global)
168 seq_printf(m, " (global)");
Chris Wilson528cbd12019-01-28 10:23:54 +0000169 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100170 if (!drm_mm_node_allocated(&vma->node))
171 continue;
172
Matthew Auld7393b7e2017-10-06 23:18:28 +0100173 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100174 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100175 vma->node.start, vma->node.size,
176 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000177 if (i915_vma_is_ggtt(vma)) {
178 switch (vma->ggtt_view.type) {
179 case I915_GGTT_VIEW_NORMAL:
180 seq_puts(m, ", normal");
181 break;
182
183 case I915_GGTT_VIEW_PARTIAL:
184 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000185 vma->ggtt_view.partial.offset << PAGE_SHIFT,
186 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000187 break;
188
189 case I915_GGTT_VIEW_ROTATED:
190 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000191 vma->ggtt_view.rotated.plane[0].width,
192 vma->ggtt_view.rotated.plane[0].height,
193 vma->ggtt_view.rotated.plane[0].stride,
194 vma->ggtt_view.rotated.plane[0].offset,
195 vma->ggtt_view.rotated.plane[1].width,
196 vma->ggtt_view.rotated.plane[1].height,
197 vma->ggtt_view.rotated.plane[1].stride,
198 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000199 break;
200
201 default:
202 MISSING_CASE(vma->ggtt_view.type);
203 break;
204 }
205 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100206 if (vma->fence)
207 seq_printf(m, " , fence: %d%s",
208 vma->fence->id,
Chris Wilson21950ee2019-02-05 13:00:05 +0000209 i915_active_request_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000210 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700211 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000212 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100213 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100214
Chris Wilsond07f0e52016-10-28 13:58:44 +0100215 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100216 if (engine)
217 seq_printf(m, " (%s)", engine->name);
218
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100219 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
220 if (frontbuffer_bits)
221 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100222}
223
Chris Wilsone637d2c2017-03-16 13:19:57 +0000224static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100225{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000226 const struct drm_i915_gem_object *a =
227 *(const struct drm_i915_gem_object **)A;
228 const struct drm_i915_gem_object *b =
229 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100230
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200231 if (a->stolen->start < b->stolen->start)
232 return -1;
233 if (a->stolen->start > b->stolen->start)
234 return 1;
235 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100236}
237
238static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
239{
David Weinehall36cdd012016-08-22 13:59:31 +0300240 struct drm_i915_private *dev_priv = node_to_i915(m->private);
241 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000242 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100243 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300244 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000245 unsigned long total, count, n;
246 int ret;
247
248 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200249 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000250 if (!objects)
251 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100252
253 ret = mutex_lock_interruptible(&dev->struct_mutex);
254 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000255 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100256
257 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100258
259 spin_lock(&dev_priv->mm.obj_lock);
260 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000261 if (count == total)
262 break;
263
Chris Wilson6d2b88852013-08-07 18:30:54 +0100264 if (obj->stolen == NULL)
265 continue;
266
Chris Wilsone637d2c2017-03-16 13:19:57 +0000267 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100268 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100269 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000270
Chris Wilson6d2b88852013-08-07 18:30:54 +0100271 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100272 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000273 if (count == total)
274 break;
275
Chris Wilson6d2b88852013-08-07 18:30:54 +0100276 if (obj->stolen == NULL)
277 continue;
278
Chris Wilsone637d2c2017-03-16 13:19:57 +0000279 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100280 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100281 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100282 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100283
Chris Wilsone637d2c2017-03-16 13:19:57 +0000284 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
285
286 seq_puts(m, "Stolen:\n");
287 for (n = 0; n < count; n++) {
288 seq_puts(m, " ");
289 describe_obj(m, objects[n]);
290 seq_putc(m, '\n');
291 }
292 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100293 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000294
295 mutex_unlock(&dev->struct_mutex);
296out:
Michal Hocko20981052017-05-17 14:23:12 +0200297 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000298 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100299}
300
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100301struct file_stats {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000302 struct i915_address_space *vm;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300303 unsigned long count;
304 u64 total, unbound;
305 u64 global, shared;
306 u64 active, inactive;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000307 u64 closed;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100308};
309
310static int per_file_stats(int id, void *ptr, void *data)
311{
312 struct drm_i915_gem_object *obj = ptr;
313 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000314 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100315
Chris Wilson0caf81b2017-06-17 12:57:44 +0100316 lockdep_assert_held(&obj->base.dev->struct_mutex);
317
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100318 stats->count++;
319 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100320 if (!obj->bind_count)
321 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000322 if (obj->base.name || obj->base.dma_buf)
323 stats->shared += obj->base.size;
324
Chris Wilson528cbd12019-01-28 10:23:54 +0000325 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100326 if (!drm_mm_node_allocated(&vma->node))
327 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000328
Chris Wilson3272db52016-08-04 16:32:32 +0100329 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100330 stats->global += vma->node.size;
331 } else {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000332 if (vma->vm != stats->vm)
Chris Wilson6313c202014-03-19 13:45:45 +0000333 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000334 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100335
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100336 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100337 stats->active += vma->node.size;
338 else
339 stats->inactive += vma->node.size;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000340
341 if (i915_vma_is_closed(vma))
342 stats->closed += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100343 }
344
345 return 0;
346}
347
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100348#define print_file_stats(m, name, stats) do { \
349 if (stats.count) \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000350 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100351 name, \
352 stats.count, \
353 stats.total, \
354 stats.active, \
355 stats.inactive, \
356 stats.global, \
357 stats.shared, \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000358 stats.unbound, \
359 stats.closed); \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100360} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800361
362static void print_batch_pool_stats(struct seq_file *m,
363 struct drm_i915_private *dev_priv)
364{
365 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000366 struct intel_engine_cs *engine;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000367 struct file_stats stats = {};
Akash Goel3b3f1652016-10-13 22:44:48 +0530368 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000369 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800370
Akash Goel3b3f1652016-10-13 22:44:48 +0530371 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000372 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100373 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000374 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100375 batch_pool_link)
376 per_file_stats(0, obj, &stats);
377 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100378 }
Brad Volkin493018d2014-12-11 12:13:08 -0800379
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100380 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800381}
382
Chris Wilson15da9562016-05-24 14:53:43 +0100383static void print_context_stats(struct seq_file *m,
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000384 struct drm_i915_private *i915)
Chris Wilson15da9562016-05-24 14:53:43 +0100385{
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000386 struct file_stats kstats = {};
387 struct i915_gem_context *ctx;
Chris Wilson15da9562016-05-24 14:53:43 +0100388
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000389 list_for_each_entry(ctx, &i915->contexts.list, link) {
390 struct intel_engine_cs *engine;
391 enum intel_engine_id id;
Chris Wilson15da9562016-05-24 14:53:43 +0100392
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000393 for_each_engine(engine, i915, id) {
394 struct intel_context *ce = to_intel_context(ctx, engine);
Chris Wilson15da9562016-05-24 14:53:43 +0100395
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000396 if (ce->state)
397 per_file_stats(0, ce->state->obj, &kstats);
398 if (ce->ring)
399 per_file_stats(0, ce->ring->vma->obj, &kstats);
400 }
401
402 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
403 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
404 struct drm_file *file = ctx->file_priv->file;
405 struct task_struct *task;
406 char name[80];
407
408 spin_lock(&file->table_lock);
409 idr_for_each(&file->object_idr, per_file_stats, &stats);
410 spin_unlock(&file->table_lock);
411
412 rcu_read_lock();
413 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
414 snprintf(name, sizeof(name), "%s/%d",
415 task ? task->comm : "<unknown>",
416 ctx->user_handle);
417 rcu_read_unlock();
418
419 print_file_stats(m, name, stats);
420 }
Chris Wilson15da9562016-05-24 14:53:43 +0100421 }
Chris Wilson15da9562016-05-24 14:53:43 +0100422
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000423 print_file_stats(m, "[k]contexts", kstats);
Chris Wilson15da9562016-05-24 14:53:43 +0100424}
425
David Weinehall36cdd012016-08-22 13:59:31 +0300426static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100427{
David Weinehall36cdd012016-08-22 13:59:31 +0300428 struct drm_i915_private *dev_priv = node_to_i915(m->private);
429 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300430 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100431 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
432 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000433 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100434 unsigned int page_sizes = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100435 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100436 int ret;
437
Chris Wilson3ef7f222016-10-18 13:02:48 +0100438 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000439 dev_priv->mm.object_count,
440 dev_priv->mm.object_memory);
441
Chris Wilson1544c422016-08-15 13:18:16 +0100442 size = count = 0;
443 mapped_size = mapped_count = 0;
444 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100445 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100446
447 spin_lock(&dev_priv->mm.obj_lock);
448 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100449 size += obj->base.size;
450 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200451
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100452 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200453 purgeable_size += obj->base.size;
454 ++purgeable_count;
455 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100456
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100457 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100458 mapped_count++;
459 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100460 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100461
462 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
463 huge_count++;
464 huge_size += obj->base.size;
465 page_sizes |= obj->mm.page_sizes.sg;
466 }
Chris Wilson6299f992010-11-24 12:23:44 +0000467 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100468 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
469
470 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100471 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100472 size += obj->base.size;
473 ++count;
474
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100475 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100476 dpy_size += obj->base.size;
477 ++dpy_count;
478 }
479
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100480 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100481 purgeable_size += obj->base.size;
482 ++purgeable_count;
483 }
484
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100485 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100486 mapped_count++;
487 mapped_size += obj->base.size;
488 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100489
490 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
491 huge_count++;
492 huge_size += obj->base.size;
493 page_sizes |= obj->mm.page_sizes.sg;
494 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100495 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100496 spin_unlock(&dev_priv->mm.obj_lock);
497
Chris Wilson2bd160a2016-08-15 10:48:45 +0100498 seq_printf(m, "%u bound objects, %llu bytes\n",
499 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300500 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200501 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100502 seq_printf(m, "%u mapped objects, %llu bytes\n",
503 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100504 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
505 huge_count,
506 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
507 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100508 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100509 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000510
Matthew Auldb7128ef2017-12-11 15:18:22 +0000511 seq_printf(m, "%llu [%pa] gtt total\n",
Chris Wilson82ad6442018-06-05 16:37:58 +0100512 ggtt->vm.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100513 seq_printf(m, "Supported page sizes: %s\n",
514 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
515 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100516
Damien Lespiau267f0c92013-06-24 22:59:48 +0100517 seq_putc(m, '\n');
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000518
519 ret = mutex_lock_interruptible(&dev->struct_mutex);
520 if (ret)
521 return ret;
522
Brad Volkin493018d2014-12-11 12:13:08 -0800523 print_batch_pool_stats(m, dev_priv);
Chris Wilson15da9562016-05-24 14:53:43 +0100524 print_context_stats(m, dev_priv);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000525 mutex_unlock(&dev->struct_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100526
527 return 0;
528}
529
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100530static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000531{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100532 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300533 struct drm_i915_private *dev_priv = node_to_i915(node);
534 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100535 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000536 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300537 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100538 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000539 int count, ret;
540
Chris Wilsonf2123812017-10-16 12:40:37 +0100541 nobject = READ_ONCE(dev_priv->mm.object_count);
542 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
543 if (!objects)
544 return -ENOMEM;
545
Chris Wilson08c18322011-01-10 00:00:24 +0000546 ret = mutex_lock_interruptible(&dev->struct_mutex);
547 if (ret)
548 return ret;
549
Chris Wilsonf2123812017-10-16 12:40:37 +0100550 count = 0;
551 spin_lock(&dev_priv->mm.obj_lock);
552 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
553 objects[count++] = obj;
554 if (count == nobject)
555 break;
556 }
557 spin_unlock(&dev_priv->mm.obj_lock);
558
559 total_obj_size = total_gtt_size = 0;
560 for (n = 0; n < count; n++) {
561 obj = objects[n];
562
Damien Lespiau267f0c92013-06-24 22:59:48 +0100563 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000564 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100565 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000566 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100567 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000568 }
569
570 mutex_unlock(&dev->struct_mutex);
571
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300572 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000573 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100574 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000575
576 return 0;
577}
578
Brad Volkin493018d2014-12-11 12:13:08 -0800579static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
580{
David Weinehall36cdd012016-08-22 13:59:31 +0300581 struct drm_i915_private *dev_priv = node_to_i915(m->private);
582 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800583 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000584 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530585 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100586 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000587 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800588
589 ret = mutex_lock_interruptible(&dev->struct_mutex);
590 if (ret)
591 return ret;
592
Akash Goel3b3f1652016-10-13 22:44:48 +0530593 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000594 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100595 int count;
596
597 count = 0;
598 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000599 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100600 batch_pool_link)
601 count++;
602 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000603 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100604
605 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000606 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100607 batch_pool_link) {
608 seq_puts(m, " ");
609 describe_obj(m, obj);
610 seq_putc(m, '\n');
611 }
612
613 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100614 }
Brad Volkin493018d2014-12-11 12:13:08 -0800615 }
616
Chris Wilson8d9d5742015-04-07 16:20:38 +0100617 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800618
619 mutex_unlock(&dev->struct_mutex);
620
621 return 0;
622}
623
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200624static void gen8_display_interrupt_info(struct seq_file *m)
625{
626 struct drm_i915_private *dev_priv = node_to_i915(m->private);
627 int pipe;
628
629 for_each_pipe(dev_priv, pipe) {
630 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000631 intel_wakeref_t wakeref;
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200632
633 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000634 wakeref = intel_display_power_get_if_enabled(dev_priv,
635 power_domain);
636 if (!wakeref) {
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200637 seq_printf(m, "Pipe %c power disabled\n",
638 pipe_name(pipe));
639 continue;
640 }
641 seq_printf(m, "Pipe %c IMR:\t%08x\n",
642 pipe_name(pipe),
643 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
644 seq_printf(m, "Pipe %c IIR:\t%08x\n",
645 pipe_name(pipe),
646 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
647 seq_printf(m, "Pipe %c IER:\t%08x\n",
648 pipe_name(pipe),
649 I915_READ(GEN8_DE_PIPE_IER(pipe)));
650
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000651 intel_display_power_put(dev_priv, power_domain, wakeref);
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200652 }
653
654 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
655 I915_READ(GEN8_DE_PORT_IMR));
656 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
657 I915_READ(GEN8_DE_PORT_IIR));
658 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
659 I915_READ(GEN8_DE_PORT_IER));
660
661 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
662 I915_READ(GEN8_DE_MISC_IMR));
663 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
664 I915_READ(GEN8_DE_MISC_IIR));
665 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
666 I915_READ(GEN8_DE_MISC_IER));
667
668 seq_printf(m, "PCU interrupt mask:\t%08x\n",
669 I915_READ(GEN8_PCU_IMR));
670 seq_printf(m, "PCU interrupt identity:\t%08x\n",
671 I915_READ(GEN8_PCU_IIR));
672 seq_printf(m, "PCU interrupt enable:\t%08x\n",
673 I915_READ(GEN8_PCU_IER));
674}
675
Ben Gamari20172632009-02-17 20:08:50 -0500676static int i915_interrupt_info(struct seq_file *m, void *data)
677{
David Weinehall36cdd012016-08-22 13:59:31 +0300678 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000679 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530680 enum intel_engine_id id;
Chris Wilsona0371212019-01-14 14:21:14 +0000681 intel_wakeref_t wakeref;
Chris Wilson4bb05042016-09-03 07:53:43 +0100682 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100683
Chris Wilsona0371212019-01-14 14:21:14 +0000684 wakeref = intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500685
David Weinehall36cdd012016-08-22 13:59:31 +0300686 if (IS_CHERRYVIEW(dev_priv)) {
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000687 intel_wakeref_t pref;
688
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300689 seq_printf(m, "Master Interrupt Control:\t%08x\n",
690 I915_READ(GEN8_MASTER_IRQ));
691
692 seq_printf(m, "Display IER:\t%08x\n",
693 I915_READ(VLV_IER));
694 seq_printf(m, "Display IIR:\t%08x\n",
695 I915_READ(VLV_IIR));
696 seq_printf(m, "Display IIR_RW:\t%08x\n",
697 I915_READ(VLV_IIR_RW));
698 seq_printf(m, "Display IMR:\t%08x\n",
699 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100700 for_each_pipe(dev_priv, pipe) {
701 enum intel_display_power_domain power_domain;
702
703 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000704 pref = intel_display_power_get_if_enabled(dev_priv,
705 power_domain);
706 if (!pref) {
Chris Wilson9c870d02016-10-24 13:42:15 +0100707 seq_printf(m, "Pipe %c power disabled\n",
708 pipe_name(pipe));
709 continue;
710 }
711
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300712 seq_printf(m, "Pipe %c stat:\t%08x\n",
713 pipe_name(pipe),
714 I915_READ(PIPESTAT(pipe)));
715
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000716 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson9c870d02016-10-24 13:42:15 +0100717 }
718
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000719 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300720 seq_printf(m, "Port hotplug:\t%08x\n",
721 I915_READ(PORT_HOTPLUG_EN));
722 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
723 I915_READ(VLV_DPFLIPSTAT));
724 seq_printf(m, "DPINVGTT:\t%08x\n",
725 I915_READ(DPINVGTT));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000726 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300727
728 for (i = 0; i < 4; i++) {
729 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
730 i, I915_READ(GEN8_GT_IMR(i)));
731 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
732 i, I915_READ(GEN8_GT_IIR(i)));
733 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
734 i, I915_READ(GEN8_GT_IER(i)));
735 }
736
737 seq_printf(m, "PCU interrupt mask:\t%08x\n",
738 I915_READ(GEN8_PCU_IMR));
739 seq_printf(m, "PCU interrupt identity:\t%08x\n",
740 I915_READ(GEN8_PCU_IIR));
741 seq_printf(m, "PCU interrupt enable:\t%08x\n",
742 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200743 } else if (INTEL_GEN(dev_priv) >= 11) {
744 seq_printf(m, "Master Interrupt Control: %08x\n",
745 I915_READ(GEN11_GFX_MSTR_IRQ));
746
747 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
748 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
749 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
750 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
751 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
752 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
753 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
754 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
755 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
756 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
757 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
758 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
759
760 seq_printf(m, "Display Interrupt Control:\t%08x\n",
761 I915_READ(GEN11_DISPLAY_INT_CTL));
762
763 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300764 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700765 seq_printf(m, "Master Interrupt Control:\t%08x\n",
766 I915_READ(GEN8_MASTER_IRQ));
767
768 for (i = 0; i < 4; i++) {
769 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
770 i, I915_READ(GEN8_GT_IMR(i)));
771 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
772 i, I915_READ(GEN8_GT_IIR(i)));
773 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
774 i, I915_READ(GEN8_GT_IER(i)));
775 }
776
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200777 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300778 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700779 seq_printf(m, "Display IER:\t%08x\n",
780 I915_READ(VLV_IER));
781 seq_printf(m, "Display IIR:\t%08x\n",
782 I915_READ(VLV_IIR));
783 seq_printf(m, "Display IIR_RW:\t%08x\n",
784 I915_READ(VLV_IIR_RW));
785 seq_printf(m, "Display IMR:\t%08x\n",
786 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000787 for_each_pipe(dev_priv, pipe) {
788 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000789 intel_wakeref_t pref;
Chris Wilson4f4631a2017-02-10 13:36:32 +0000790
791 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000792 pref = intel_display_power_get_if_enabled(dev_priv,
793 power_domain);
794 if (!pref) {
Chris Wilson4f4631a2017-02-10 13:36:32 +0000795 seq_printf(m, "Pipe %c power disabled\n",
796 pipe_name(pipe));
797 continue;
798 }
799
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700800 seq_printf(m, "Pipe %c stat:\t%08x\n",
801 pipe_name(pipe),
802 I915_READ(PIPESTAT(pipe)));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000803 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson4f4631a2017-02-10 13:36:32 +0000804 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700805
806 seq_printf(m, "Master IER:\t%08x\n",
807 I915_READ(VLV_MASTER_IER));
808
809 seq_printf(m, "Render IER:\t%08x\n",
810 I915_READ(GTIER));
811 seq_printf(m, "Render IIR:\t%08x\n",
812 I915_READ(GTIIR));
813 seq_printf(m, "Render IMR:\t%08x\n",
814 I915_READ(GTIMR));
815
816 seq_printf(m, "PM IER:\t\t%08x\n",
817 I915_READ(GEN6_PMIER));
818 seq_printf(m, "PM IIR:\t\t%08x\n",
819 I915_READ(GEN6_PMIIR));
820 seq_printf(m, "PM IMR:\t\t%08x\n",
821 I915_READ(GEN6_PMIMR));
822
823 seq_printf(m, "Port hotplug:\t%08x\n",
824 I915_READ(PORT_HOTPLUG_EN));
825 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
826 I915_READ(VLV_DPFLIPSTAT));
827 seq_printf(m, "DPINVGTT:\t%08x\n",
828 I915_READ(DPINVGTT));
829
David Weinehall36cdd012016-08-22 13:59:31 +0300830 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800831 seq_printf(m, "Interrupt enable: %08x\n",
832 I915_READ(IER));
833 seq_printf(m, "Interrupt identity: %08x\n",
834 I915_READ(IIR));
835 seq_printf(m, "Interrupt mask: %08x\n",
836 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100837 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800838 seq_printf(m, "Pipe %c stat: %08x\n",
839 pipe_name(pipe),
840 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800841 } else {
842 seq_printf(m, "North Display Interrupt enable: %08x\n",
843 I915_READ(DEIER));
844 seq_printf(m, "North Display Interrupt identity: %08x\n",
845 I915_READ(DEIIR));
846 seq_printf(m, "North Display Interrupt mask: %08x\n",
847 I915_READ(DEIMR));
848 seq_printf(m, "South Display Interrupt enable: %08x\n",
849 I915_READ(SDEIER));
850 seq_printf(m, "South Display Interrupt identity: %08x\n",
851 I915_READ(SDEIIR));
852 seq_printf(m, "South Display Interrupt mask: %08x\n",
853 I915_READ(SDEIMR));
854 seq_printf(m, "Graphics Interrupt enable: %08x\n",
855 I915_READ(GTIER));
856 seq_printf(m, "Graphics Interrupt identity: %08x\n",
857 I915_READ(GTIIR));
858 seq_printf(m, "Graphics Interrupt mask: %08x\n",
859 I915_READ(GTIMR));
860 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200861
862 if (INTEL_GEN(dev_priv) >= 11) {
863 seq_printf(m, "RCS Intr Mask:\t %08x\n",
864 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
865 seq_printf(m, "BCS Intr Mask:\t %08x\n",
866 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
867 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
868 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
869 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
870 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
871 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
872 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
873 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
874 I915_READ(GEN11_GUC_SG_INTR_MASK));
875 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
876 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
877 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
878 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
879 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
880 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
881
882 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000883 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100884 seq_printf(m,
885 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000886 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000887 }
Chris Wilson9862e602011-01-04 22:22:17 +0000888 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200889
Chris Wilsona0371212019-01-14 14:21:14 +0000890 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100891
Ben Gamari20172632009-02-17 20:08:50 -0500892 return 0;
893}
894
Chris Wilsona6172a82009-02-11 14:26:38 +0000895static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
896{
David Weinehall36cdd012016-08-22 13:59:31 +0300897 struct drm_i915_private *dev_priv = node_to_i915(m->private);
898 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100899 int i, ret;
900
901 ret = mutex_lock_interruptible(&dev->struct_mutex);
902 if (ret)
903 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000904
Chris Wilsona6172a82009-02-11 14:26:38 +0000905 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
906 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100907 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000908
Chris Wilson6c085a72012-08-20 11:40:46 +0200909 seq_printf(m, "Fence %d, pin count = %d, object = ",
910 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100911 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100912 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100913 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100914 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100915 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000916 }
917
Chris Wilson05394f32010-11-08 19:18:58 +0000918 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000919 return 0;
920}
921
Chris Wilson98a2f412016-10-12 10:05:18 +0100922#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000923static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
924 size_t count, loff_t *pos)
925{
Chris Wilson0e390372018-11-23 13:23:25 +0000926 struct i915_gpu_state *error;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000927 ssize_t ret;
Chris Wilson0e390372018-11-23 13:23:25 +0000928 void *buf;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000929
Chris Wilson0e390372018-11-23 13:23:25 +0000930 error = file->private_data;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000931 if (!error)
932 return 0;
933
Chris Wilson0e390372018-11-23 13:23:25 +0000934 /* Bounce buffer required because of kernfs __user API convenience. */
935 buf = kmalloc(count, GFP_KERNEL);
936 if (!buf)
937 return -ENOMEM;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000938
Chris Wilson0e390372018-11-23 13:23:25 +0000939 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
940 if (ret <= 0)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000941 goto out;
942
Chris Wilson0e390372018-11-23 13:23:25 +0000943 if (!copy_to_user(ubuf, buf, ret))
944 *pos += ret;
945 else
946 ret = -EFAULT;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000947
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000948out:
Chris Wilson0e390372018-11-23 13:23:25 +0000949 kfree(buf);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000950 return ret;
951}
952
953static int gpu_state_release(struct inode *inode, struct file *file)
954{
955 i915_gpu_state_put(file->private_data);
956 return 0;
957}
958
959static int i915_gpu_info_open(struct inode *inode, struct file *file)
960{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100961 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000962 struct i915_gpu_state *gpu;
Chris Wilsona0371212019-01-14 14:21:14 +0000963 intel_wakeref_t wakeref;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000964
Chris Wilsond4225a52019-01-14 14:21:23 +0000965 gpu = NULL;
966 with_intel_runtime_pm(i915, wakeref)
967 gpu = i915_capture_gpu_state(i915);
Chris Wilsone6154e42018-12-07 11:05:54 +0000968 if (IS_ERR(gpu))
969 return PTR_ERR(gpu);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000970
971 file->private_data = gpu;
972 return 0;
973}
974
975static const struct file_operations i915_gpu_info_fops = {
976 .owner = THIS_MODULE,
977 .open = i915_gpu_info_open,
978 .read = gpu_state_read,
979 .llseek = default_llseek,
980 .release = gpu_state_release,
981};
Chris Wilson98a2f412016-10-12 10:05:18 +0100982
Daniel Vetterd5442302012-04-27 15:17:40 +0200983static ssize_t
984i915_error_state_write(struct file *filp,
985 const char __user *ubuf,
986 size_t cnt,
987 loff_t *ppos)
988{
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000989 struct i915_gpu_state *error = filp->private_data;
990
991 if (!error)
992 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200993
994 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000995 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +0200996
997 return cnt;
998}
999
1000static int i915_error_state_open(struct inode *inode, struct file *file)
1001{
Chris Wilsone6154e42018-12-07 11:05:54 +00001002 struct i915_gpu_state *error;
1003
1004 error = i915_first_error_state(inode->i_private);
1005 if (IS_ERR(error))
1006 return PTR_ERR(error);
1007
1008 file->private_data = error;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001009 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001010}
1011
Daniel Vetterd5442302012-04-27 15:17:40 +02001012static const struct file_operations i915_error_state_fops = {
1013 .owner = THIS_MODULE,
1014 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001015 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001016 .write = i915_error_state_write,
1017 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001018 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001019};
Chris Wilson98a2f412016-10-12 10:05:18 +01001020#endif
1021
Deepak Sadb4bd12014-03-31 11:30:02 +05301022static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001023{
David Weinehall36cdd012016-08-22 13:59:31 +03001024 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001025 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsona0371212019-01-14 14:21:14 +00001026 intel_wakeref_t wakeref;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001027 int ret = 0;
1028
Chris Wilsona0371212019-01-14 14:21:14 +00001029 wakeref = intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001030
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001031 if (IS_GEN(dev_priv, 5)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001032 u16 rgvswctl = I915_READ16(MEMSWCTL);
1033 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1034
1035 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1036 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1037 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1038 MEMSTAT_VID_SHIFT);
1039 seq_printf(m, "Current P-state: %d\n",
1040 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001041 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001042 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001043
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001044 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001045
1046 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1047 seq_printf(m, "Video Turbo Mode: %s\n",
1048 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1049 seq_printf(m, "HW control enabled: %s\n",
1050 yesno(rpmodectl & GEN6_RP_ENABLE));
1051 seq_printf(m, "SW control enabled: %s\n",
1052 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1053 GEN6_RP_MEDIA_SW_MODE));
1054
Wayne Boyer666a4532015-12-09 12:29:35 -08001055 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1056 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1057 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1058
1059 seq_printf(m, "actual GPU freq: %d MHz\n",
1060 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1061
1062 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001063 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001064
1065 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001066 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001067
1068 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001069 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001070
1071 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001072 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001073
1074 seq_printf(m,
1075 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001076 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001077 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001078 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001079 u32 rp_state_limits;
1080 u32 gt_perf_status;
1081 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001082 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001083 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001084 u32 rpupei, rpcurup, rpprevup;
1085 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001086 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001087 int max_freq;
1088
Bob Paauwe35040562015-06-25 14:54:07 -07001089 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001090 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001091 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1092 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1093 } else {
1094 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1095 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1096 }
1097
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001098 /* RPSTAT1 is in the GT power well */
Mika Kuoppala59bad942015-01-16 11:34:40 +02001099 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001100
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001101 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001102 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301103 reqf >>= 23;
1104 else {
1105 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001106 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301107 reqf >>= 24;
1108 else
1109 reqf >>= 25;
1110 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001111 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001112
Chris Wilson0d8f9492014-03-27 09:06:14 +00001113 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1114 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1115 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1116
Jesse Barnesccab5c82011-01-18 15:49:25 -08001117 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301118 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1119 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1120 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1121 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1122 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1123 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001124 cagf = intel_gpu_freq(dev_priv,
1125 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001126
Mika Kuoppala59bad942015-01-16 11:34:40 +02001127 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001128
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001129 if (INTEL_GEN(dev_priv) >= 11) {
1130 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1131 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1132 /*
1133 * The equivalent to the PM ISR & IIR cannot be read
1134 * without affecting the current state of the system
1135 */
1136 pm_isr = 0;
1137 pm_iir = 0;
1138 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001139 pm_ier = I915_READ(GEN8_GT_IER(2));
1140 pm_imr = I915_READ(GEN8_GT_IMR(2));
1141 pm_isr = I915_READ(GEN8_GT_ISR(2));
1142 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001143 } else {
1144 pm_ier = I915_READ(GEN6_PMIER);
1145 pm_imr = I915_READ(GEN6_PMIMR);
1146 pm_isr = I915_READ(GEN6_PMISR);
1147 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001148 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001149 pm_mask = I915_READ(GEN6_PMINTRMSK);
1150
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001151 seq_printf(m, "Video Turbo Mode: %s\n",
1152 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1153 seq_printf(m, "HW control enabled: %s\n",
1154 yesno(rpmodectl & GEN6_RP_ENABLE));
1155 seq_printf(m, "SW control enabled: %s\n",
1156 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1157 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001158
1159 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1160 pm_ier, pm_imr, pm_mask);
1161 if (INTEL_GEN(dev_priv) <= 10)
1162 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1163 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301164 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001165 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001166 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001167 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001168 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001169 seq_printf(m, "Render p-state VID: %d\n",
1170 gt_perf_status & 0xff);
1171 seq_printf(m, "Render p-state limit: %d\n",
1172 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001173 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1174 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1175 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1176 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001177 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001178 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301179 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1180 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1181 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1182 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1183 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1184 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilson60548c52018-07-31 14:26:29 +01001185 seq_printf(m, "Up threshold: %d%%\n",
1186 rps->power.up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001187
Akash Goeld6cda9c2016-04-23 00:05:46 +05301188 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1189 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1190 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1191 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1192 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1193 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilson60548c52018-07-31 14:26:29 +01001194 seq_printf(m, "Down threshold: %d%%\n",
1195 rps->power.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001196
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001197 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001198 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001199 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001200 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001201 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001202 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001203
1204 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001205 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001206 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001207 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001208 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001209
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001210 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001211 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001212 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001213 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001214 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001215 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001216 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001217 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001218
Chris Wilsond86ed342015-04-27 13:41:19 +01001219 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001220 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001221 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001222 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001223 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001224 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001225 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001226 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001227 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001228 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001229 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001230 seq_printf(m,
1231 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001232 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001233 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001234 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001235 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001236
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001237 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001238 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1239 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1240
Chris Wilsona0371212019-01-14 14:21:14 +00001241 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001242 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001243}
1244
Ben Widawskyd6369512016-09-20 16:54:32 +03001245static void i915_instdone_info(struct drm_i915_private *dev_priv,
1246 struct seq_file *m,
1247 struct intel_instdone *instdone)
1248{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001249 int slice;
1250 int subslice;
1251
Ben Widawskyd6369512016-09-20 16:54:32 +03001252 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1253 instdone->instdone);
1254
1255 if (INTEL_GEN(dev_priv) <= 3)
1256 return;
1257
1258 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1259 instdone->slice_common);
1260
1261 if (INTEL_GEN(dev_priv) <= 6)
1262 return;
1263
Ben Widawskyf9e61372016-09-20 16:54:33 +03001264 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1265 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1266 slice, subslice, instdone->sampler[slice][subslice]);
1267
1268 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1269 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1270 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001271}
1272
Chris Wilsonf6544492015-01-26 18:03:04 +02001273static int i915_hangcheck_info(struct seq_file *m, void *unused)
1274{
David Weinehall36cdd012016-08-22 13:59:31 +03001275 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001276 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001277 u64 acthd[I915_NUM_ENGINES];
1278 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001279 struct intel_instdone instdone;
Chris Wilsona0371212019-01-14 14:21:14 +00001280 intel_wakeref_t wakeref;
Dave Gordonc3232b12016-03-23 18:19:53 +00001281 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001282
Chris Wilson8af29b02016-09-09 14:11:47 +01001283 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001284 seq_puts(m, "Wedged\n");
1285 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1286 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001287 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001288 seq_puts(m, "Waiter holding struct mutex\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001289 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001290 seq_puts(m, "struct_mutex blocked for reset\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001291
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001292 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001293 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001294 return 0;
1295 }
1296
Chris Wilsond4225a52019-01-14 14:21:23 +00001297 with_intel_runtime_pm(dev_priv, wakeref) {
1298 for_each_engine(engine, dev_priv, id) {
1299 acthd[id] = intel_engine_get_active_head(engine);
1300 seqno[id] = intel_engine_get_seqno(engine);
1301 }
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001302
Chris Wilsond4225a52019-01-14 14:21:23 +00001303 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001304 }
1305
Chris Wilson8352aea2017-03-03 09:00:56 +00001306 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1307 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001308 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1309 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001310 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1311 seq_puts(m, "Hangcheck active, work pending\n");
1312 else
1313 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001314
Chris Wilsonf73b5672017-03-02 15:03:56 +00001315 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1316
Akash Goel3b3f1652016-10-13 22:44:48 +05301317 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001318 seq_printf(m, "%s:\n", engine->name);
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001319 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
Chris Wilsoncb399ea2016-11-01 10:03:16 +00001320 engine->hangcheck.seqno, seqno[id],
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001321 intel_engine_last_submit(engine),
1322 jiffies_to_msecs(jiffies -
1323 engine->hangcheck.action_timestamp));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001324
Chris Wilsonf6544492015-01-26 18:03:04 +02001325 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001326 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001327 (long long)acthd[id]);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001328
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001329 if (engine->id == RCS) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001330 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001331
Ben Widawskyd6369512016-09-20 16:54:32 +03001332 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001333
Ben Widawskyd6369512016-09-20 16:54:32 +03001334 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001335
Ben Widawskyd6369512016-09-20 16:54:32 +03001336 i915_instdone_info(dev_priv, m,
1337 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001338 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001339 }
1340
1341 return 0;
1342}
1343
Michel Thierry061d06a2017-06-20 10:57:49 +01001344static int i915_reset_info(struct seq_file *m, void *unused)
1345{
1346 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1347 struct i915_gpu_error *error = &dev_priv->gpu_error;
1348 struct intel_engine_cs *engine;
1349 enum intel_engine_id id;
1350
1351 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1352
1353 for_each_engine(engine, dev_priv, id) {
1354 seq_printf(m, "%s = %u\n", engine->name,
1355 i915_reset_engine_count(error, engine));
1356 }
1357
1358 return 0;
1359}
1360
Ben Widawsky4d855292011-12-12 19:34:16 -08001361static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001362{
David Weinehall36cdd012016-08-22 13:59:31 +03001363 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001364 u32 rgvmodectl, rstdbyctl;
1365 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001366
Ben Widawsky616fdb52011-10-05 11:44:54 -07001367 rgvmodectl = I915_READ(MEMMODECTL);
1368 rstdbyctl = I915_READ(RSTDBYCTL);
1369 crstandvid = I915_READ16(CRSTANDVID);
1370
Jani Nikula742f4912015-09-03 11:16:09 +03001371 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001372 seq_printf(m, "Boost freq: %d\n",
1373 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1374 MEMMODE_BOOST_FREQ_SHIFT);
1375 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001376 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001377 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001378 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001379 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001380 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001381 seq_printf(m, "Starting frequency: P%d\n",
1382 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001383 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001384 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001385 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1386 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1387 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1388 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001389 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001390 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001391 switch (rstdbyctl & RSX_STATUS_MASK) {
1392 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001393 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001394 break;
1395 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001396 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001397 break;
1398 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001399 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001400 break;
1401 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001402 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001403 break;
1404 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001405 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001406 break;
1407 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001408 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001409 break;
1410 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001411 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001412 break;
1413 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001414
1415 return 0;
1416}
1417
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001418static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001419{
Chris Wilson233ebf52017-03-23 10:19:44 +00001420 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001421 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001422 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001423
Chris Wilsond7a133d2017-09-07 14:44:41 +01001424 seq_printf(m, "user.bypass_count = %u\n",
1425 i915->uncore.user_forcewake.count);
1426
Chris Wilson233ebf52017-03-23 10:19:44 +00001427 for_each_fw_domain(fw_domain, i915, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001428 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001429 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001430 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001431
1432 return 0;
1433}
1434
Mika Kuoppala13628772017-03-15 17:43:02 +02001435static void print_rc6_res(struct seq_file *m,
1436 const char *title,
1437 const i915_reg_t reg)
1438{
1439 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1440
1441 seq_printf(m, "%s %u (%llu us)\n",
1442 title, I915_READ(reg),
1443 intel_rc6_residency_us(dev_priv, reg));
1444}
1445
Deepak S669ab5a2014-01-10 15:18:26 +05301446static int vlv_drpc_info(struct seq_file *m)
1447{
David Weinehall36cdd012016-08-22 13:59:31 +03001448 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001449 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301450
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001451 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301452 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1453
Deepak S669ab5a2014-01-10 15:18:26 +05301454 seq_printf(m, "RC6 Enabled: %s\n",
1455 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1456 GEN6_RC_CTL_EI_MODE(1))));
1457 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001458 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301459 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001460 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301461
Mika Kuoppala13628772017-03-15 17:43:02 +02001462 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1463 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001464
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001465 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301466}
1467
Ben Widawsky4d855292011-12-12 19:34:16 -08001468static int gen6_drpc_info(struct seq_file *m)
1469{
David Weinehall36cdd012016-08-22 13:59:31 +03001470 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001471 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301472 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001473
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001474 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001475 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001476
Ben Widawsky4d855292011-12-12 19:34:16 -08001477 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001478 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301479 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1480 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1481 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001482
Imre Deak51cc9ad2018-02-08 19:41:02 +02001483 if (INTEL_GEN(dev_priv) <= 7) {
1484 mutex_lock(&dev_priv->pcu_lock);
1485 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1486 &rc6vids);
1487 mutex_unlock(&dev_priv->pcu_lock);
1488 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001489
Eric Anholtfff24e22012-01-23 16:14:05 -08001490 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001491 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1492 seq_printf(m, "RC6 Enabled: %s\n",
1493 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001494 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301495 seq_printf(m, "Render Well Gating Enabled: %s\n",
1496 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1497 seq_printf(m, "Media Well Gating Enabled: %s\n",
1498 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1499 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001500 seq_printf(m, "Deep RC6 Enabled: %s\n",
1501 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1502 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1503 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001504 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001505 switch (gt_core_status & GEN6_RCn_MASK) {
1506 case GEN6_RC0:
1507 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001508 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001509 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001510 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001511 break;
1512 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001513 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001514 break;
1515 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001516 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001517 break;
1518 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001519 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001520 break;
1521 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001522 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001523 break;
1524 }
1525
1526 seq_printf(m, "Core Power Down: %s\n",
1527 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001528 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301529 seq_printf(m, "Render Power Well: %s\n",
1530 (gen9_powergate_status &
1531 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1532 seq_printf(m, "Media Power Well: %s\n",
1533 (gen9_powergate_status &
1534 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1535 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001536
1537 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001538 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1539 GEN6_GT_GFX_RC6_LOCKED);
1540 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1541 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1542 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001543
Imre Deak51cc9ad2018-02-08 19:41:02 +02001544 if (INTEL_GEN(dev_priv) <= 7) {
1545 seq_printf(m, "RC6 voltage: %dmV\n",
1546 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1547 seq_printf(m, "RC6+ voltage: %dmV\n",
1548 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1549 seq_printf(m, "RC6++ voltage: %dmV\n",
1550 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1551 }
1552
Akash Goelf2dd7572016-06-27 20:10:01 +05301553 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001554}
1555
1556static int i915_drpc_info(struct seq_file *m, void *unused)
1557{
David Weinehall36cdd012016-08-22 13:59:31 +03001558 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001559 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00001560 int err = -ENODEV;
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001561
Chris Wilsond4225a52019-01-14 14:21:23 +00001562 with_intel_runtime_pm(dev_priv, wakeref) {
1563 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1564 err = vlv_drpc_info(m);
1565 else if (INTEL_GEN(dev_priv) >= 6)
1566 err = gen6_drpc_info(m);
1567 else
1568 err = ironlake_drpc_info(m);
1569 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001570
1571 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001572}
1573
Daniel Vetter9a851782015-06-18 10:30:22 +02001574static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1575{
David Weinehall36cdd012016-08-22 13:59:31 +03001576 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001577
1578 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1579 dev_priv->fb_tracking.busy_bits);
1580
1581 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1582 dev_priv->fb_tracking.flip_bits);
1583
1584 return 0;
1585}
1586
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001587static int i915_fbc_status(struct seq_file *m, void *unused)
1588{
David Weinehall36cdd012016-08-22 13:59:31 +03001589 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001590 struct intel_fbc *fbc = &dev_priv->fbc;
Chris Wilsona0371212019-01-14 14:21:14 +00001591 intel_wakeref_t wakeref;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001592
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001593 if (!HAS_FBC(dev_priv))
1594 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001595
Chris Wilsona0371212019-01-14 14:21:14 +00001596 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001597 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001598
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001599 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001600 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001601 else
Chris Wilson31388722017-12-20 20:58:48 +00001602 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1603
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001604 if (intel_fbc_is_active(dev_priv)) {
1605 u32 mask;
1606
1607 if (INTEL_GEN(dev_priv) >= 8)
1608 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1609 else if (INTEL_GEN(dev_priv) >= 7)
1610 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1611 else if (INTEL_GEN(dev_priv) >= 5)
1612 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1613 else if (IS_G4X(dev_priv))
1614 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1615 else
1616 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1617 FBC_STAT_COMPRESSED);
1618
1619 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001620 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001621
Chris Wilson31388722017-12-20 20:58:48 +00001622 mutex_unlock(&fbc->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00001623 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001624
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001625 return 0;
1626}
1627
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001628static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001629{
David Weinehall36cdd012016-08-22 13:59:31 +03001630 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001631
David Weinehall36cdd012016-08-22 13:59:31 +03001632 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001633 return -ENODEV;
1634
Rodrigo Vivida46f932014-08-01 02:04:45 -07001635 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001636
1637 return 0;
1638}
1639
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001640static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001641{
David Weinehall36cdd012016-08-22 13:59:31 +03001642 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001643 u32 reg;
1644
David Weinehall36cdd012016-08-22 13:59:31 +03001645 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001646 return -ENODEV;
1647
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001648 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001649
1650 reg = I915_READ(ILK_DPFC_CONTROL);
1651 dev_priv->fbc.false_color = val;
1652
1653 I915_WRITE(ILK_DPFC_CONTROL, val ?
1654 (reg | FBC_CTL_FALSE_COLOR) :
1655 (reg & ~FBC_CTL_FALSE_COLOR));
1656
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001657 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001658 return 0;
1659}
1660
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001661DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1662 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001663 "%llu\n");
1664
Paulo Zanoni92d44622013-05-31 16:33:24 -03001665static int i915_ips_status(struct seq_file *m, void *unused)
1666{
David Weinehall36cdd012016-08-22 13:59:31 +03001667 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001668 intel_wakeref_t wakeref;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001669
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001670 if (!HAS_IPS(dev_priv))
1671 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001672
Chris Wilsona0371212019-01-14 14:21:14 +00001673 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001674
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001675 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001676 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001677
David Weinehall36cdd012016-08-22 13:59:31 +03001678 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001679 seq_puts(m, "Currently: unknown\n");
1680 } else {
1681 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1682 seq_puts(m, "Currently: enabled\n");
1683 else
1684 seq_puts(m, "Currently: disabled\n");
1685 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001686
Chris Wilsona0371212019-01-14 14:21:14 +00001687 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001688
Paulo Zanoni92d44622013-05-31 16:33:24 -03001689 return 0;
1690}
1691
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001692static int i915_sr_status(struct seq_file *m, void *unused)
1693{
David Weinehall36cdd012016-08-22 13:59:31 +03001694 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001695 intel_wakeref_t wakeref;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001696 bool sr_enabled = false;
1697
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001698 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001699
Chris Wilson7342a722017-03-09 14:20:49 +00001700 if (INTEL_GEN(dev_priv) >= 9)
1701 /* no global SR status; inspect per-plane WM */;
1702 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001703 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001704 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001705 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001706 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001707 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001708 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001709 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001710 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001711 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001712 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001713
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001714 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001715
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001716 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001717
1718 return 0;
1719}
1720
Jesse Barnes7648fa92010-05-20 14:28:11 -07001721static int i915_emon_status(struct seq_file *m, void *unused)
1722{
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001723 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001724 intel_wakeref_t wakeref;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001725
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001726 if (!IS_GEN(i915, 5))
Chris Wilson582be6b2012-04-30 19:35:02 +01001727 return -ENODEV;
1728
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001729 with_intel_runtime_pm(i915, wakeref) {
1730 unsigned long temp, chipset, gfx;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001731
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001732 temp = i915_mch_val(i915);
1733 chipset = i915_chipset_val(i915);
1734 gfx = i915_gfx_val(i915);
Chris Wilsona0371212019-01-14 14:21:14 +00001735
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001736 seq_printf(m, "GMCH temp: %ld\n", temp);
1737 seq_printf(m, "Chipset power: %ld\n", chipset);
1738 seq_printf(m, "GFX power: %ld\n", gfx);
1739 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1740 }
Jesse Barnes7648fa92010-05-20 14:28:11 -07001741
1742 return 0;
1743}
1744
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001745static int i915_ring_freq_table(struct seq_file *m, void *unused)
1746{
David Weinehall36cdd012016-08-22 13:59:31 +03001747 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001748 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301749 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001750 intel_wakeref_t wakeref;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001751 int gpu_freq, ia_freq;
1752 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001753
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001754 if (!HAS_LLC(dev_priv))
1755 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001756
Chris Wilsona0371212019-01-14 14:21:14 +00001757 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001758
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001759 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001760 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001761 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001762
Chris Wilsond586b5f2018-03-08 14:26:48 +00001763 min_gpu_freq = rps->min_freq;
1764 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001765 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301766 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001767 min_gpu_freq /= GEN9_FREQ_SCALER;
1768 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301769 }
1770
Damien Lespiau267f0c92013-06-24 22:59:48 +01001771 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001772
Akash Goelf936ec32015-06-29 14:50:22 +05301773 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001774 ia_freq = gpu_freq;
1775 sandybridge_pcode_read(dev_priv,
1776 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1777 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001778 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301779 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001780 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001781 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001782 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001783 ((ia_freq >> 0) & 0xff) * 100,
1784 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001785 }
1786
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001787 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001788
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001789out:
Chris Wilsona0371212019-01-14 14:21:14 +00001790 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001791 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001792}
1793
Chris Wilson44834a62010-08-19 16:09:23 +01001794static int i915_opregion(struct seq_file *m, void *unused)
1795{
David Weinehall36cdd012016-08-22 13:59:31 +03001796 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1797 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001798 struct intel_opregion *opregion = &dev_priv->opregion;
1799 int ret;
1800
1801 ret = mutex_lock_interruptible(&dev->struct_mutex);
1802 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001803 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001804
Jani Nikula2455a8e2015-12-14 12:50:53 +02001805 if (opregion->header)
1806 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001807
1808 mutex_unlock(&dev->struct_mutex);
1809
Daniel Vetter0d38f002012-04-21 22:49:10 +02001810out:
Chris Wilson44834a62010-08-19 16:09:23 +01001811 return 0;
1812}
1813
Jani Nikulaada8f952015-12-15 13:17:12 +02001814static int i915_vbt(struct seq_file *m, void *unused)
1815{
David Weinehall36cdd012016-08-22 13:59:31 +03001816 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001817
1818 if (opregion->vbt)
1819 seq_write(m, opregion->vbt, opregion->vbt_size);
1820
1821 return 0;
1822}
1823
Chris Wilson37811fc2010-08-25 22:45:57 +01001824static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1825{
David Weinehall36cdd012016-08-22 13:59:31 +03001826 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1827 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301828 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001829 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001830 int ret;
1831
1832 ret = mutex_lock_interruptible(&dev->struct_mutex);
1833 if (ret)
1834 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001835
Daniel Vetter06957262015-08-10 13:34:08 +02001836#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001837 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001838 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001839
Chris Wilson25bcce92016-07-02 15:36:00 +01001840 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1841 fbdev_fb->base.width,
1842 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001843 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001844 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001845 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001846 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001847 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001848 seq_putc(m, '\n');
1849 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001850#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001851
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001852 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001853 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301854 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1855 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001856 continue;
1857
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001858 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001859 fb->base.width,
1860 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001861 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001862 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001863 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001864 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001865 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001866 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001867 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001868 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001869 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001870
1871 return 0;
1872}
1873
Chris Wilson7e37f882016-08-02 22:50:21 +01001874static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001875{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001876 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1877 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001878}
1879
Ben Widawskye76d3632011-03-19 18:14:29 -07001880static int i915_context_status(struct seq_file *m, void *unused)
1881{
David Weinehall36cdd012016-08-22 13:59:31 +03001882 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1883 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001884 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001885 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05301886 enum intel_engine_id id;
Dave Gordonc3232b12016-03-23 18:19:53 +00001887 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001888
Daniel Vetterf3d28872014-05-29 23:23:08 +02001889 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001890 if (ret)
1891 return ret;
1892
Chris Wilson829a0af2017-06-20 12:05:45 +01001893 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson288f1ce2018-09-04 16:31:17 +01001894 seq_puts(m, "HW context ");
1895 if (!list_empty(&ctx->hw_id_link))
1896 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1897 atomic_read(&ctx->hw_id_pin_count));
Chris Wilsonc84455b2016-08-15 10:49:08 +01001898 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001899 struct task_struct *task;
1900
Chris Wilsonc84455b2016-08-15 10:49:08 +01001901 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001902 if (task) {
1903 seq_printf(m, "(%s [%d]) ",
1904 task->comm, task->pid);
1905 put_task_struct(task);
1906 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001907 } else if (IS_ERR(ctx->file_priv)) {
1908 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001909 } else {
1910 seq_puts(m, "(kernel) ");
1911 }
1912
Chris Wilsonbca44d82016-05-24 14:53:41 +01001913 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1914 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001915
Akash Goel3b3f1652016-10-13 22:44:48 +05301916 for_each_engine(engine, dev_priv, id) {
Chris Wilsonab82a062018-04-30 14:15:01 +01001917 struct intel_context *ce =
1918 to_intel_context(ctx, engine);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001919
1920 seq_printf(m, "%s: ", engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001921 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001922 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001923 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001924 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001925 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001926 }
1927
Ben Widawskya33afea2013-09-17 21:12:45 -07001928 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001929 }
1930
Daniel Vetterf3d28872014-05-29 23:23:08 +02001931 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001932
1933 return 0;
1934}
1935
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001936static const char *swizzle_string(unsigned swizzle)
1937{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001938 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001939 case I915_BIT_6_SWIZZLE_NONE:
1940 return "none";
1941 case I915_BIT_6_SWIZZLE_9:
1942 return "bit9";
1943 case I915_BIT_6_SWIZZLE_9_10:
1944 return "bit9/bit10";
1945 case I915_BIT_6_SWIZZLE_9_11:
1946 return "bit9/bit11";
1947 case I915_BIT_6_SWIZZLE_9_10_11:
1948 return "bit9/bit10/bit11";
1949 case I915_BIT_6_SWIZZLE_9_17:
1950 return "bit9/bit17";
1951 case I915_BIT_6_SWIZZLE_9_10_17:
1952 return "bit9/bit10/bit17";
1953 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001954 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001955 }
1956
1957 return "bug";
1958}
1959
1960static int i915_swizzle_info(struct seq_file *m, void *data)
1961{
David Weinehall36cdd012016-08-22 13:59:31 +03001962 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001963 intel_wakeref_t wakeref;
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001964
Chris Wilsona0371212019-01-14 14:21:14 +00001965 wakeref = intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001966
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001967 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1968 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1969 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1970 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1971
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08001972 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001973 seq_printf(m, "DDC = 0x%08x\n",
1974 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01001975 seq_printf(m, "DDC2 = 0x%08x\n",
1976 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001977 seq_printf(m, "C0DRB3 = 0x%04x\n",
1978 I915_READ16(C0DRB3));
1979 seq_printf(m, "C1DRB3 = 0x%04x\n",
1980 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03001981 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001982 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1983 I915_READ(MAD_DIMM_C0));
1984 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1985 I915_READ(MAD_DIMM_C1));
1986 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1987 I915_READ(MAD_DIMM_C2));
1988 seq_printf(m, "TILECTL = 0x%08x\n",
1989 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03001990 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07001991 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1992 I915_READ(GAMTARBMODE));
1993 else
1994 seq_printf(m, "ARB_MODE = 0x%08x\n",
1995 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001996 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1997 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001998 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001999
2000 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2001 seq_puts(m, "L-shaped memory detected\n");
2002
Chris Wilsona0371212019-01-14 14:21:14 +00002003 intel_runtime_pm_put(dev_priv, wakeref);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002004
2005 return 0;
2006}
2007
Chris Wilson7466c292016-08-15 09:49:33 +01002008static const char *rps_power_to_str(unsigned int power)
2009{
2010 static const char * const strings[] = {
2011 [LOW_POWER] = "low power",
2012 [BETWEEN] = "mixed",
2013 [HIGH_POWER] = "high power",
2014 };
2015
2016 if (power >= ARRAY_SIZE(strings) || !strings[power])
2017 return "unknown";
2018
2019 return strings[power];
2020}
2021
Chris Wilson1854d5c2015-04-07 16:20:32 +01002022static int i915_rps_boost_info(struct seq_file *m, void *data)
2023{
David Weinehall36cdd012016-08-22 13:59:31 +03002024 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2025 struct drm_device *dev = &dev_priv->drm;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002026 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002027 u32 act_freq = rps->cur_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00002028 intel_wakeref_t wakeref;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002029 struct drm_file *file;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002030
Chris Wilsond4225a52019-01-14 14:21:23 +00002031 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002032 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2033 mutex_lock(&dev_priv->pcu_lock);
2034 act_freq = vlv_punit_read(dev_priv,
2035 PUNIT_REG_GPU_FREQ_STS);
2036 act_freq = (act_freq >> 8) & 0xff;
2037 mutex_unlock(&dev_priv->pcu_lock);
2038 } else {
2039 act_freq = intel_get_cagf(dev_priv,
2040 I915_READ(GEN6_RPSTAT1));
2041 }
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002042 }
2043
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002044 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002045 seq_printf(m, "GPU busy? %s [%d requests]\n",
2046 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002047 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002048 atomic_read(&rps->num_waiters));
Chris Wilson60548c52018-07-31 14:26:29 +01002049 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002050 seq_printf(m, "Frequency requested %d, actual %d\n",
2051 intel_gpu_freq(dev_priv, rps->cur_freq),
2052 intel_gpu_freq(dev_priv, act_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002053 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002054 intel_gpu_freq(dev_priv, rps->min_freq),
2055 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2056 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2057 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002058 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002059 intel_gpu_freq(dev_priv, rps->idle_freq),
2060 intel_gpu_freq(dev_priv, rps->efficient_freq),
2061 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002062
2063 mutex_lock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002064 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2065 struct drm_i915_file_private *file_priv = file->driver_priv;
2066 struct task_struct *task;
2067
2068 rcu_read_lock();
2069 task = pid_task(file->pid, PIDTYPE_PID);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002070 seq_printf(m, "%s [%d]: %d boosts\n",
Chris Wilson1854d5c2015-04-07 16:20:32 +01002071 task ? task->comm : "<unknown>",
2072 task ? task->pid : -1,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002073 atomic_read(&file_priv->rps_client.boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002074 rcu_read_unlock();
2075 }
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002076 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002077 atomic_read(&rps->boosts));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002078 mutex_unlock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002079
Chris Wilson7466c292016-08-15 09:49:33 +01002080 if (INTEL_GEN(dev_priv) >= 6 &&
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002081 rps->enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002082 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002083 u32 rpup, rpupei;
2084 u32 rpdown, rpdownei;
2085
2086 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2087 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2088 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2089 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2090 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2091 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2092
2093 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Chris Wilson60548c52018-07-31 14:26:29 +01002094 rps_power_to_str(rps->power.mode));
Chris Wilson7466c292016-08-15 09:49:33 +01002095 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002096 rpup && rpupei ? 100 * rpup / rpupei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002097 rps->power.up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002098 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002099 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002100 rps->power.down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002101 } else {
2102 seq_puts(m, "\nRPS Autotuning inactive\n");
2103 }
2104
Chris Wilson8d3afd72015-05-21 21:01:47 +01002105 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002106}
2107
Ben Widawsky63573eb2013-07-04 11:02:07 -07002108static int i915_llc(struct seq_file *m, void *data)
2109{
David Weinehall36cdd012016-08-22 13:59:31 +03002110 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002111 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002112
David Weinehall36cdd012016-08-22 13:59:31 +03002113 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002114 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2115 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002116
2117 return 0;
2118}
2119
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002120static int i915_huc_load_status_info(struct seq_file *m, void *data)
2121{
2122 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002123 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002124 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002125
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002126 if (!HAS_HUC(dev_priv))
2127 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002128
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002129 p = drm_seq_file_printer(m);
2130 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002131
Chris Wilsond4225a52019-01-14 14:21:23 +00002132 with_intel_runtime_pm(dev_priv, wakeref)
2133 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002134
2135 return 0;
2136}
2137
Alex Daifdf5d352015-08-12 15:43:37 +01002138static int i915_guc_load_status_info(struct seq_file *m, void *data)
2139{
David Weinehall36cdd012016-08-22 13:59:31 +03002140 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002141 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002142 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002143
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002144 if (!HAS_GUC(dev_priv))
2145 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002146
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002147 p = drm_seq_file_printer(m);
2148 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002149
Chris Wilsond4225a52019-01-14 14:21:23 +00002150 with_intel_runtime_pm(dev_priv, wakeref) {
2151 u32 tmp = I915_READ(GUC_STATUS);
2152 u32 i;
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302153
Chris Wilsond4225a52019-01-14 14:21:23 +00002154 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2155 seq_printf(m, "\tBootrom status = 0x%x\n",
2156 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2157 seq_printf(m, "\tuKernel status = 0x%x\n",
2158 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2159 seq_printf(m, "\tMIA Core status = 0x%x\n",
2160 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2161 seq_puts(m, "\nScratch registers:\n");
2162 for (i = 0; i < 16; i++) {
2163 seq_printf(m, "\t%2d: \t0x%x\n",
2164 i, I915_READ(SOFT_SCRATCH(i)));
2165 }
2166 }
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302167
Alex Daifdf5d352015-08-12 15:43:37 +01002168 return 0;
2169}
2170
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002171static const char *
2172stringify_guc_log_type(enum guc_log_buffer_type type)
2173{
2174 switch (type) {
2175 case GUC_ISR_LOG_BUFFER:
2176 return "ISR";
2177 case GUC_DPC_LOG_BUFFER:
2178 return "DPC";
2179 case GUC_CRASH_DUMP_LOG_BUFFER:
2180 return "CRASH";
2181 default:
2182 MISSING_CASE(type);
2183 }
2184
2185 return "";
2186}
2187
Akash Goel5aa1ee42016-10-12 21:54:36 +05302188static void i915_guc_log_info(struct seq_file *m,
2189 struct drm_i915_private *dev_priv)
2190{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002191 struct intel_guc_log *log = &dev_priv->guc.log;
2192 enum guc_log_buffer_type type;
2193
2194 if (!intel_guc_log_relay_enabled(log)) {
2195 seq_puts(m, "GuC log relay disabled\n");
2196 return;
2197 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302198
Michał Winiarskidb557992018-03-19 10:53:43 +01002199 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302200
Michał Winiarski6a96be22018-03-19 10:53:42 +01002201 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002202 log->relay.full_count);
2203
2204 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2205 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2206 stringify_guc_log_type(type),
2207 log->stats[type].flush,
2208 log->stats[type].sampled_overflow);
2209 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302210}
2211
Dave Gordon8b417c22015-08-12 15:43:44 +01002212static void i915_guc_client_info(struct seq_file *m,
2213 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302214 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002215{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002216 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002217 enum intel_engine_id id;
Jani Nikulae5315212019-01-16 11:15:23 +02002218 u64 tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002219
Oscar Mateob09935a2017-03-22 10:39:53 -07002220 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2221 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002222 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2223 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002224
Akash Goel3b3f1652016-10-13 22:44:48 +05302225 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002226 u64 submissions = client->submissions[id];
2227 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002228 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002229 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002230 }
2231 seq_printf(m, "\tTotal: %llu\n", tot);
2232}
2233
2234static int i915_guc_info(struct seq_file *m, void *data)
2235{
David Weinehall36cdd012016-08-22 13:59:31 +03002236 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002237 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002238
Michał Winiarskidb557992018-03-19 10:53:43 +01002239 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002240 return -ENODEV;
2241
Michał Winiarskidb557992018-03-19 10:53:43 +01002242 i915_guc_log_info(m, dev_priv);
2243
2244 if (!USES_GUC_SUBMISSION(dev_priv))
2245 return 0;
2246
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002247 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002248
Michał Winiarskidb557992018-03-19 10:53:43 +01002249 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002250 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002251 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002252
Chris Wilson334636c2016-11-29 12:10:20 +00002253 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2254 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002255 if (guc->preempt_client) {
2256 seq_printf(m, "\nGuC preempt client @ %p:\n",
2257 guc->preempt_client);
2258 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2259 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002260
2261 /* Add more as required ... */
2262
2263 return 0;
2264}
2265
Oscar Mateoa8b93702017-05-10 15:04:51 +00002266static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002267{
David Weinehall36cdd012016-08-22 13:59:31 +03002268 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002269 const struct intel_guc *guc = &dev_priv->guc;
2270 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302271 struct intel_guc_client *client = guc->execbuf_client;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002272 unsigned int tmp;
2273 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002274
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002275 if (!USES_GUC_SUBMISSION(dev_priv))
2276 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002277
Oscar Mateoa8b93702017-05-10 15:04:51 +00002278 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2279 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002280
Oscar Mateoa8b93702017-05-10 15:04:51 +00002281 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2282 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002283
Oscar Mateoa8b93702017-05-10 15:04:51 +00002284 seq_printf(m, "GuC stage descriptor %u:\n", index);
2285 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2286 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2287 seq_printf(m, "\tPriority: %d\n", desc->priority);
2288 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2289 seq_printf(m, "\tEngines used: 0x%x\n",
2290 desc->engines_used);
2291 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2292 desc->db_trigger_phy,
2293 desc->db_trigger_cpu,
2294 desc->db_trigger_uk);
2295 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2296 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002297 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002298 desc->wq_addr, desc->wq_size);
2299 seq_putc(m, '\n');
2300
2301 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2302 u32 guc_engine_id = engine->guc_id;
2303 struct guc_execlist_context *lrc =
2304 &desc->lrc[guc_engine_id];
2305
2306 seq_printf(m, "\t%s LRC:\n", engine->name);
2307 seq_printf(m, "\t\tContext desc: 0x%x\n",
2308 lrc->context_desc);
2309 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2310 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2311 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2312 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2313 seq_putc(m, '\n');
2314 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002315 }
2316
Oscar Mateoa8b93702017-05-10 15:04:51 +00002317 return 0;
2318}
2319
Alex Dai4c7e77f2015-08-12 15:43:40 +01002320static int i915_guc_log_dump(struct seq_file *m, void *data)
2321{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002322 struct drm_info_node *node = m->private;
2323 struct drm_i915_private *dev_priv = node_to_i915(node);
2324 bool dump_load_err = !!node->info_ent->data;
2325 struct drm_i915_gem_object *obj = NULL;
2326 u32 *log;
2327 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002328
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002329 if (!HAS_GUC(dev_priv))
2330 return -ENODEV;
2331
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002332 if (dump_load_err)
2333 obj = dev_priv->guc.load_err_log;
2334 else if (dev_priv->guc.log.vma)
2335 obj = dev_priv->guc.log.vma->obj;
2336
2337 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002338 return 0;
2339
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002340 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2341 if (IS_ERR(log)) {
2342 DRM_DEBUG("Failed to pin object\n");
2343 seq_puts(m, "(log data unaccessible)\n");
2344 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002345 }
2346
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002347 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2348 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2349 *(log + i), *(log + i + 1),
2350 *(log + i + 2), *(log + i + 3));
2351
Alex Dai4c7e77f2015-08-12 15:43:40 +01002352 seq_putc(m, '\n');
2353
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002354 i915_gem_object_unpin_map(obj);
2355
Alex Dai4c7e77f2015-08-12 15:43:40 +01002356 return 0;
2357}
2358
Michał Winiarski4977a282018-03-19 10:53:40 +01002359static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302360{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002361 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302362
Michał Winiarski86aa8242018-03-08 16:46:53 +01002363 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002364 return -ENODEV;
2365
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002366 *val = intel_guc_log_get_level(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302367
2368 return 0;
2369}
2370
Michał Winiarski4977a282018-03-19 10:53:40 +01002371static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302372{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002373 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302374
Michał Winiarski86aa8242018-03-08 16:46:53 +01002375 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002376 return -ENODEV;
2377
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002378 return intel_guc_log_set_level(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302379}
2380
Michał Winiarski4977a282018-03-19 10:53:40 +01002381DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2382 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302383 "%lld\n");
2384
Michał Winiarski4977a282018-03-19 10:53:40 +01002385static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2386{
2387 struct drm_i915_private *dev_priv = inode->i_private;
2388
2389 if (!USES_GUC(dev_priv))
2390 return -ENODEV;
2391
2392 file->private_data = &dev_priv->guc.log;
2393
2394 return intel_guc_log_relay_open(&dev_priv->guc.log);
2395}
2396
2397static ssize_t
2398i915_guc_log_relay_write(struct file *filp,
2399 const char __user *ubuf,
2400 size_t cnt,
2401 loff_t *ppos)
2402{
2403 struct intel_guc_log *log = filp->private_data;
2404
2405 intel_guc_log_relay_flush(log);
2406
2407 return cnt;
2408}
2409
2410static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2411{
2412 struct drm_i915_private *dev_priv = inode->i_private;
2413
2414 intel_guc_log_relay_close(&dev_priv->guc.log);
2415
2416 return 0;
2417}
2418
2419static const struct file_operations i915_guc_log_relay_fops = {
2420 .owner = THIS_MODULE,
2421 .open = i915_guc_log_relay_open,
2422 .write = i915_guc_log_relay_write,
2423 .release = i915_guc_log_relay_release,
2424};
2425
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002426static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2427{
2428 u8 val;
2429 static const char * const sink_status[] = {
2430 "inactive",
2431 "transition to active, capture and display",
2432 "active, display from RFB",
2433 "active, capture and display on sink device timings",
2434 "transition to inactive, capture and display, timing re-sync",
2435 "reserved",
2436 "reserved",
2437 "sink internal error",
2438 };
2439 struct drm_connector *connector = m->private;
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002440 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002441 struct intel_dp *intel_dp =
2442 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002443 int ret;
2444
2445 if (!CAN_PSR(dev_priv)) {
2446 seq_puts(m, "PSR Unsupported\n");
2447 return -ENODEV;
2448 }
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002449
2450 if (connector->status != connector_status_connected)
2451 return -ENODEV;
2452
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002453 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2454
2455 if (ret == 1) {
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002456 const char *str = "unknown";
2457
2458 val &= DP_PSR_SINK_STATE_MASK;
2459 if (val < ARRAY_SIZE(sink_status))
2460 str = sink_status[val];
2461 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2462 } else {
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002463 return ret;
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002464 }
2465
2466 return 0;
2467}
2468DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2469
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302470static void
2471psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002472{
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002473 u32 val, status_val;
2474 const char *status = "unknown";
Chris Wilsonb86bef202017-01-16 13:06:21 +00002475
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302476 if (dev_priv->psr.psr2_enabled) {
2477 static const char * const live_status[] = {
2478 "IDLE",
2479 "CAPTURE",
2480 "CAPTURE_FS",
2481 "SLEEP",
2482 "BUFON_FW",
2483 "ML_UP",
2484 "SU_STANDBY",
2485 "FAST_SLEEP",
2486 "DEEP_SLEEP",
2487 "BUF_ON",
2488 "TG_ON"
2489 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002490 val = I915_READ(EDP_PSR2_STATUS);
2491 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2492 EDP_PSR2_STATUS_STATE_SHIFT;
2493 if (status_val < ARRAY_SIZE(live_status))
2494 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302495 } else {
2496 static const char * const live_status[] = {
2497 "IDLE",
2498 "SRDONACK",
2499 "SRDENT",
2500 "BUFOFF",
2501 "BUFON",
2502 "AUXACK",
2503 "SRDOFFACK",
2504 "SRDENT_ON",
2505 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002506 val = I915_READ(EDP_PSR_STATUS);
2507 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2508 EDP_PSR_STATUS_STATE_SHIFT;
2509 if (status_val < ARRAY_SIZE(live_status))
2510 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302511 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002512
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002513 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
Chris Wilsonb86bef202017-01-16 13:06:21 +00002514}
2515
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002516static int i915_edp_psr_status(struct seq_file *m, void *data)
2517{
David Weinehall36cdd012016-08-22 13:59:31 +03002518 struct drm_i915_private *dev_priv = node_to_i915(m->private);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002519 struct i915_psr *psr = &dev_priv->psr;
Chris Wilsona0371212019-01-14 14:21:14 +00002520 intel_wakeref_t wakeref;
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002521 const char *status;
2522 bool enabled;
2523 u32 val;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002524
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002525 if (!HAS_PSR(dev_priv))
2526 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002527
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002528 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2529 if (psr->dp)
2530 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2531 seq_puts(m, "\n");
2532
2533 if (!psr->sink_support)
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002534 return 0;
2535
Chris Wilsona0371212019-01-14 14:21:14 +00002536 wakeref = intel_runtime_pm_get(dev_priv);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002537 mutex_lock(&psr->lock);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002538
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002539 if (psr->enabled)
2540 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002541 else
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002542 status = "disabled";
2543 seq_printf(m, "PSR mode: %s\n", status);
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002544
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002545 if (!psr->enabled)
2546 goto unlock;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002547
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002548 if (psr->psr2_enabled) {
2549 val = I915_READ(EDP_PSR2_CTL);
2550 enabled = val & EDP_PSR2_ENABLE;
2551 } else {
2552 val = I915_READ(EDP_PSR_CTL);
2553 enabled = val & EDP_PSR_ENABLE;
2554 }
2555 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2556 enableddisabled(enabled), val);
2557 psr_source_status(dev_priv, m);
2558 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2559 psr->busy_frontbuffer_bits);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002560
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002561 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002562 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2563 */
David Weinehall36cdd012016-08-22 13:59:31 +03002564 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002565 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2566 seq_printf(m, "Performance counter: %u\n", val);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002567 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302568
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002569 if (psr->debug & I915_PSR_DEBUG_IRQ) {
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002570 seq_printf(m, "Last attempted entry at: %lld\n",
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002571 psr->last_entry_attempt);
2572 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002573 }
2574
José Roberto de Souzaa81f7812019-01-17 12:55:48 -08002575 if (psr->psr2_enabled) {
2576 u32 su_frames_val[3];
2577 int frame;
2578
2579 /*
2580 * Reading all 3 registers before hand to minimize crossing a
2581 * frame boundary between register reads
2582 */
2583 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2584 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2585
2586 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2587
2588 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2589 u32 su_blocks;
2590
2591 su_blocks = su_frames_val[frame / 3] &
2592 PSR2_SU_STATUS_MASK(frame);
2593 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2594 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2595 }
2596 }
2597
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002598unlock:
2599 mutex_unlock(&psr->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00002600 intel_runtime_pm_put(dev_priv, wakeref);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002601
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002602 return 0;
2603}
2604
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002605static int
2606i915_edp_psr_debug_set(void *data, u64 val)
2607{
2608 struct drm_i915_private *dev_priv = data;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002609 struct drm_modeset_acquire_ctx ctx;
Chris Wilsona0371212019-01-14 14:21:14 +00002610 intel_wakeref_t wakeref;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002611 int ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002612
2613 if (!CAN_PSR(dev_priv))
2614 return -ENODEV;
2615
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002616 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002617
Chris Wilsona0371212019-01-14 14:21:14 +00002618 wakeref = intel_runtime_pm_get(dev_priv);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002619
2620 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2621
2622retry:
2623 ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2624 if (ret == -EDEADLK) {
2625 ret = drm_modeset_backoff(&ctx);
2626 if (!ret)
2627 goto retry;
2628 }
2629
2630 drm_modeset_drop_locks(&ctx);
2631 drm_modeset_acquire_fini(&ctx);
2632
Chris Wilsona0371212019-01-14 14:21:14 +00002633 intel_runtime_pm_put(dev_priv, wakeref);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002634
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002635 return ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002636}
2637
2638static int
2639i915_edp_psr_debug_get(void *data, u64 *val)
2640{
2641 struct drm_i915_private *dev_priv = data;
2642
2643 if (!CAN_PSR(dev_priv))
2644 return -ENODEV;
2645
2646 *val = READ_ONCE(dev_priv->psr.debug);
2647 return 0;
2648}
2649
2650DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2651 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2652 "%llu\n");
2653
Jesse Barnesec013e72013-08-20 10:29:23 +01002654static int i915_energy_uJ(struct seq_file *m, void *data)
2655{
David Weinehall36cdd012016-08-22 13:59:31 +03002656 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002657 unsigned long long power;
Chris Wilsona0371212019-01-14 14:21:14 +00002658 intel_wakeref_t wakeref;
Jesse Barnesec013e72013-08-20 10:29:23 +01002659 u32 units;
2660
David Weinehall36cdd012016-08-22 13:59:31 +03002661 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002662 return -ENODEV;
2663
Chris Wilsond4225a52019-01-14 14:21:23 +00002664 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002665 return -ENODEV;
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002666
2667 units = (power & 0x1f00) >> 8;
Chris Wilsond4225a52019-01-14 14:21:23 +00002668 with_intel_runtime_pm(dev_priv, wakeref)
2669 power = I915_READ(MCH_SECP_NRG_STTS);
2670
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002671 power = (1000000 * power) >> units; /* convert to uJ */
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002672 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002673
2674 return 0;
2675}
2676
Damien Lespiau6455c872015-06-04 18:23:57 +01002677static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002678{
David Weinehall36cdd012016-08-22 13:59:31 +03002679 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002680 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002681
Chris Wilsona156e642016-04-03 14:14:21 +01002682 if (!HAS_RUNTIME_PM(dev_priv))
2683 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002684
Chris Wilson25c896bd2019-01-14 14:21:25 +00002685 seq_printf(m, "Runtime power status: %s\n",
2686 enableddisabled(!dev_priv->power_domains.wakeref));
2687
Chris Wilson6f561032018-01-24 11:36:07 +00002688 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2689 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
Paulo Zanoni371db662013-08-19 13:18:10 -03002690 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002691 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002692#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002693 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002694 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002695#else
2696 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2697#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002698 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002699 pci_power_name(pdev->current_state),
2700 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002701
Chris Wilsonbd780f32019-01-14 14:21:09 +00002702 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2703 struct drm_printer p = drm_seq_file_printer(m);
2704
2705 print_intel_runtime_pm_wakeref(dev_priv, &p);
2706 }
2707
Jesse Barnesec013e72013-08-20 10:29:23 +01002708 return 0;
2709}
2710
Imre Deak1da51582013-11-25 17:15:35 +02002711static int i915_power_domain_info(struct seq_file *m, void *unused)
2712{
David Weinehall36cdd012016-08-22 13:59:31 +03002713 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002714 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2715 int i;
2716
2717 mutex_lock(&power_domains->lock);
2718
2719 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2720 for (i = 0; i < power_domains->power_well_count; i++) {
2721 struct i915_power_well *power_well;
2722 enum intel_display_power_domain power_domain;
2723
2724 power_well = &power_domains->power_wells[i];
Imre Deakf28ec6f2018-08-06 12:58:37 +03002725 seq_printf(m, "%-25s %d\n", power_well->desc->name,
Imre Deak1da51582013-11-25 17:15:35 +02002726 power_well->count);
2727
Imre Deakf28ec6f2018-08-06 12:58:37 +03002728 for_each_power_domain(power_domain, power_well->desc->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002729 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002730 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002731 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002732 }
2733
2734 mutex_unlock(&power_domains->lock);
2735
2736 return 0;
2737}
2738
Damien Lespiaub7cec662015-10-27 14:47:01 +02002739static int i915_dmc_info(struct seq_file *m, void *unused)
2740{
David Weinehall36cdd012016-08-22 13:59:31 +03002741 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002742 intel_wakeref_t wakeref;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002743 struct intel_csr *csr;
2744
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002745 if (!HAS_CSR(dev_priv))
2746 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002747
2748 csr = &dev_priv->csr;
2749
Chris Wilsona0371212019-01-14 14:21:14 +00002750 wakeref = intel_runtime_pm_get(dev_priv);
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002751
Damien Lespiaub7cec662015-10-27 14:47:01 +02002752 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2753 seq_printf(m, "path: %s\n", csr->fw_path);
2754
2755 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002756 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002757
2758 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2759 CSR_VERSION_MINOR(csr->version));
2760
Imre Deak34b2f8d2018-10-31 22:02:20 +02002761 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2762 goto out;
2763
2764 seq_printf(m, "DC3 -> DC5 count: %d\n",
2765 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2766 SKL_CSR_DC3_DC5_COUNT));
2767 if (!IS_GEN9_LP(dev_priv))
Damien Lespiau83372062015-10-30 17:53:32 +02002768 seq_printf(m, "DC5 -> DC6 count: %d\n",
2769 I915_READ(SKL_CSR_DC5_DC6_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002770
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002771out:
2772 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2773 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2774 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2775
Chris Wilsona0371212019-01-14 14:21:14 +00002776 intel_runtime_pm_put(dev_priv, wakeref);
Damien Lespiau83372062015-10-30 17:53:32 +02002777
Damien Lespiaub7cec662015-10-27 14:47:01 +02002778 return 0;
2779}
2780
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002781static void intel_seq_print_mode(struct seq_file *m, int tabs,
2782 struct drm_display_mode *mode)
2783{
2784 int i;
2785
2786 for (i = 0; i < tabs; i++)
2787 seq_putc(m, '\t');
2788
Shayenne Moura4fb6bb82018-12-20 10:27:57 -02002789 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002790}
2791
2792static void intel_encoder_info(struct seq_file *m,
2793 struct intel_crtc *intel_crtc,
2794 struct intel_encoder *intel_encoder)
2795{
David Weinehall36cdd012016-08-22 13:59:31 +03002796 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2797 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002798 struct drm_crtc *crtc = &intel_crtc->base;
2799 struct intel_connector *intel_connector;
2800 struct drm_encoder *encoder;
2801
2802 encoder = &intel_encoder->base;
2803 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002804 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002805 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2806 struct drm_connector *connector = &intel_connector->base;
2807 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2808 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002809 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002810 drm_get_connector_status_name(connector->status));
2811 if (connector->status == connector_status_connected) {
2812 struct drm_display_mode *mode = &crtc->mode;
2813 seq_printf(m, ", mode:\n");
2814 intel_seq_print_mode(m, 2, mode);
2815 } else {
2816 seq_putc(m, '\n');
2817 }
2818 }
2819}
2820
2821static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2822{
David Weinehall36cdd012016-08-22 13:59:31 +03002823 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2824 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002825 struct drm_crtc *crtc = &intel_crtc->base;
2826 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002827 struct drm_plane_state *plane_state = crtc->primary->state;
2828 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002829
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002830 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002831 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002832 fb->base.id, plane_state->src_x >> 16,
2833 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002834 else
2835 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002836 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2837 intel_encoder_info(m, intel_crtc, intel_encoder);
2838}
2839
2840static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2841{
2842 struct drm_display_mode *mode = panel->fixed_mode;
2843
2844 seq_printf(m, "\tfixed mode:\n");
2845 intel_seq_print_mode(m, 2, mode);
2846}
2847
2848static void intel_dp_info(struct seq_file *m,
2849 struct intel_connector *intel_connector)
2850{
2851 struct intel_encoder *intel_encoder = intel_connector->encoder;
2852 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2853
2854 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03002855 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02002856 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002857 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03002858
2859 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2860 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002861}
2862
Libin Yang9a148a92016-11-28 20:07:05 +08002863static void intel_dp_mst_info(struct seq_file *m,
2864 struct intel_connector *intel_connector)
2865{
2866 struct intel_encoder *intel_encoder = intel_connector->encoder;
2867 struct intel_dp_mst_encoder *intel_mst =
2868 enc_to_mst(&intel_encoder->base);
2869 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2870 struct intel_dp *intel_dp = &intel_dig_port->dp;
2871 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2872 intel_connector->port);
2873
2874 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2875}
2876
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002877static void intel_hdmi_info(struct seq_file *m,
2878 struct intel_connector *intel_connector)
2879{
2880 struct intel_encoder *intel_encoder = intel_connector->encoder;
2881 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2882
Jani Nikula742f4912015-09-03 11:16:09 +03002883 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002884}
2885
2886static void intel_lvds_info(struct seq_file *m,
2887 struct intel_connector *intel_connector)
2888{
2889 intel_panel_info(m, &intel_connector->panel);
2890}
2891
2892static void intel_connector_info(struct seq_file *m,
2893 struct drm_connector *connector)
2894{
2895 struct intel_connector *intel_connector = to_intel_connector(connector);
2896 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08002897 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002898
2899 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03002900 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002901 drm_get_connector_status_name(connector->status));
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002902
2903 if (connector->status == connector_status_disconnected)
2904 return;
2905
2906 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2907 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2908 connector->display_info.width_mm,
2909 connector->display_info.height_mm);
2910 seq_printf(m, "\tsubpixel order: %s\n",
2911 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2912 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002913
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02002914 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002915 return;
2916
2917 switch (connector->connector_type) {
2918 case DRM_MODE_CONNECTOR_DisplayPort:
2919 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08002920 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2921 intel_dp_mst_info(m, intel_connector);
2922 else
2923 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002924 break;
2925 case DRM_MODE_CONNECTOR_LVDS:
2926 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10002927 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002928 break;
2929 case DRM_MODE_CONNECTOR_HDMIA:
2930 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03002931 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002932 intel_hdmi_info(m, intel_connector);
2933 break;
2934 default:
2935 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10002936 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002937
Jesse Barnesf103fc72014-02-20 12:39:57 -08002938 seq_printf(m, "\tmodes:\n");
2939 list_for_each_entry(mode, &connector->modes, head)
2940 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002941}
2942
Robert Fekete3abc4e02015-10-27 16:58:32 +01002943static const char *plane_type(enum drm_plane_type type)
2944{
2945 switch (type) {
2946 case DRM_PLANE_TYPE_OVERLAY:
2947 return "OVL";
2948 case DRM_PLANE_TYPE_PRIMARY:
2949 return "PRI";
2950 case DRM_PLANE_TYPE_CURSOR:
2951 return "CUR";
2952 /*
2953 * Deliberately omitting default: to generate compiler warnings
2954 * when a new drm_plane_type gets added.
2955 */
2956 }
2957
2958 return "unknown";
2959}
2960
Jani Nikula5852a152019-01-07 16:51:49 +02002961static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
Robert Fekete3abc4e02015-10-27 16:58:32 +01002962{
Robert Fekete3abc4e02015-10-27 16:58:32 +01002963 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04002964 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01002965 * will print them all to visualize if the values are misused
2966 */
Jani Nikula5852a152019-01-07 16:51:49 +02002967 snprintf(buf, bufsize,
Robert Fekete3abc4e02015-10-27 16:58:32 +01002968 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04002969 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2970 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2971 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2972 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2973 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2974 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01002975 rotation);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002976}
2977
2978static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2979{
David Weinehall36cdd012016-08-22 13:59:31 +03002980 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2981 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01002982 struct intel_plane *intel_plane;
2983
2984 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2985 struct drm_plane_state *state;
2986 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002987 struct drm_format_name_buf format_name;
Jani Nikula5852a152019-01-07 16:51:49 +02002988 char rot_str[48];
Robert Fekete3abc4e02015-10-27 16:58:32 +01002989
2990 if (!plane->state) {
2991 seq_puts(m, "plane->state is NULL!\n");
2992 continue;
2993 }
2994
2995 state = plane->state;
2996
Eric Engestrom90844f02016-08-15 01:02:38 +01002997 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002998 drm_get_format_name(state->fb->format->format,
2999 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01003000 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003001 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01003002 }
3003
Jani Nikula5852a152019-01-07 16:51:49 +02003004 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
3005
Robert Fekete3abc4e02015-10-27 16:58:32 +01003006 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3007 plane->base.id,
3008 plane_type(intel_plane->base.type),
3009 state->crtc_x, state->crtc_y,
3010 state->crtc_w, state->crtc_h,
3011 (state->src_x >> 16),
3012 ((state->src_x & 0xffff) * 15625) >> 10,
3013 (state->src_y >> 16),
3014 ((state->src_y & 0xffff) * 15625) >> 10,
3015 (state->src_w >> 16),
3016 ((state->src_w & 0xffff) * 15625) >> 10,
3017 (state->src_h >> 16),
3018 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003019 format_name.str,
Jani Nikula5852a152019-01-07 16:51:49 +02003020 rot_str);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003021 }
3022}
3023
3024static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3025{
3026 struct intel_crtc_state *pipe_config;
3027 int num_scalers = intel_crtc->num_scalers;
3028 int i;
3029
3030 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3031
3032 /* Not all platformas have a scaler */
3033 if (num_scalers) {
3034 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3035 num_scalers,
3036 pipe_config->scaler_state.scaler_users,
3037 pipe_config->scaler_state.scaler_id);
3038
A.Sunil Kamath58415912016-11-20 23:20:26 +05303039 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003040 struct intel_scaler *sc =
3041 &pipe_config->scaler_state.scalers[i];
3042
3043 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3044 i, yesno(sc->in_use), sc->mode);
3045 }
3046 seq_puts(m, "\n");
3047 } else {
3048 seq_puts(m, "\tNo scalers available on this platform\n");
3049 }
3050}
3051
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003052static int i915_display_info(struct seq_file *m, void *unused)
3053{
David Weinehall36cdd012016-08-22 13:59:31 +03003054 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3055 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec22014-03-12 09:13:13 +00003056 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003057 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003058 struct drm_connector_list_iter conn_iter;
Chris Wilsona0371212019-01-14 14:21:14 +00003059 intel_wakeref_t wakeref;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003060
Chris Wilsona0371212019-01-14 14:21:14 +00003061 wakeref = intel_runtime_pm_get(dev_priv);
3062
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003063 seq_printf(m, "CRTC info\n");
3064 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003065 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003066 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003067
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003068 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003069 pipe_config = to_intel_crtc_state(crtc->base.state);
3070
Robert Fekete3abc4e02015-10-27 16:58:32 +01003071 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec22014-03-12 09:13:13 +00003072 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003073 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003074 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3075 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3076
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003077 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003078 struct intel_plane *cursor =
3079 to_intel_plane(crtc->base.cursor);
3080
Chris Wilson065f2ec22014-03-12 09:13:13 +00003081 intel_crtc_info(m, crtc);
3082
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003083 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3084 yesno(cursor->base.state->visible),
3085 cursor->base.state->crtc_x,
3086 cursor->base.state->crtc_y,
3087 cursor->base.state->crtc_w,
3088 cursor->base.state->crtc_h,
3089 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003090 intel_scaler_info(m, crtc);
3091 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003092 }
Daniel Vettercace8412014-05-22 17:56:31 +02003093
3094 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3095 yesno(!crtc->cpu_fifo_underrun_disabled),
3096 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003097 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003098 }
3099
3100 seq_printf(m, "\n");
3101 seq_printf(m, "Connector info\n");
3102 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003103 mutex_lock(&dev->mode_config.mutex);
3104 drm_connector_list_iter_begin(dev, &conn_iter);
3105 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003106 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003107 drm_connector_list_iter_end(&conn_iter);
3108 mutex_unlock(&dev->mode_config.mutex);
3109
Chris Wilsona0371212019-01-14 14:21:14 +00003110 intel_runtime_pm_put(dev_priv, wakeref);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003111
3112 return 0;
3113}
3114
Chris Wilson1b365952016-10-04 21:11:31 +01003115static int i915_engine_info(struct seq_file *m, void *unused)
3116{
3117 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3118 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +00003119 intel_wakeref_t wakeref;
Akash Goel3b3f1652016-10-13 22:44:48 +05303120 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003121 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003122
Chris Wilsona0371212019-01-14 14:21:14 +00003123 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01003124
Chris Wilson6f561032018-01-24 11:36:07 +00003125 seq_printf(m, "GT awake? %s (epoch %u)\n",
3126 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003127 seq_printf(m, "Global active requests: %d\n",
3128 dev_priv->gt.active_requests);
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003129 seq_printf(m, "CS timestamp frequency: %u kHz\n",
Jani Nikula02584042018-12-31 16:56:41 +02003130 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003131
Chris Wilsonf636edb2017-10-09 12:02:57 +01003132 p = drm_seq_file_printer(m);
3133 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003134 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003135
Chris Wilsona0371212019-01-14 14:21:14 +00003136 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilson9c870d02016-10-24 13:42:15 +01003137
Chris Wilson1b365952016-10-04 21:11:31 +01003138 return 0;
3139}
3140
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003141static int i915_rcs_topology(struct seq_file *m, void *unused)
3142{
3143 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3144 struct drm_printer p = drm_seq_file_printer(m);
3145
Jani Nikula02584042018-12-31 16:56:41 +02003146 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003147
3148 return 0;
3149}
3150
Chris Wilsonc5418a82017-10-13 21:26:19 +01003151static int i915_shrinker_info(struct seq_file *m, void *unused)
3152{
3153 struct drm_i915_private *i915 = node_to_i915(m->private);
3154
3155 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3156 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3157
3158 return 0;
3159}
3160
Daniel Vetter728e29d2014-06-25 22:01:53 +03003161static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3162{
David Weinehall36cdd012016-08-22 13:59:31 +03003163 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3164 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003165 int i;
3166
3167 drm_modeset_lock_all(dev);
3168 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3169 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3170
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003171 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003172 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003173 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003174 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003175 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003176 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003177 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003178 pll->state.hw_state.dpll_md);
3179 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3180 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3181 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07003182 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3183 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3184 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3185 pll->state.hw_state.mg_refclkin_ctl);
3186 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3187 pll->state.hw_state.mg_clktop2_coreclkctl1);
3188 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3189 pll->state.hw_state.mg_clktop2_hsclkctl);
3190 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3191 pll->state.hw_state.mg_pll_div0);
3192 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3193 pll->state.hw_state.mg_pll_div1);
3194 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3195 pll->state.hw_state.mg_pll_lf);
3196 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3197 pll->state.hw_state.mg_pll_frac_lock);
3198 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3199 pll->state.hw_state.mg_pll_ssc);
3200 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3201 pll->state.hw_state.mg_pll_bias);
3202 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3203 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003204 }
3205 drm_modeset_unlock_all(dev);
3206
3207 return 0;
3208}
3209
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003210static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003211{
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003212 struct drm_i915_private *i915 = node_to_i915(m->private);
3213 const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3214 struct i915_wa *wa;
3215 unsigned int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003216
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003217 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3218 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
Chris Wilson548764b2018-06-15 13:02:07 +01003219 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003220 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
Arun Siluvery888b5992014-08-26 14:44:51 +01003221
3222 return 0;
3223}
3224
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303225static int i915_ipc_status_show(struct seq_file *m, void *data)
3226{
3227 struct drm_i915_private *dev_priv = m->private;
3228
3229 seq_printf(m, "Isochronous Priority Control: %s\n",
3230 yesno(dev_priv->ipc_enabled));
3231 return 0;
3232}
3233
3234static int i915_ipc_status_open(struct inode *inode, struct file *file)
3235{
3236 struct drm_i915_private *dev_priv = inode->i_private;
3237
3238 if (!HAS_IPC(dev_priv))
3239 return -ENODEV;
3240
3241 return single_open(file, i915_ipc_status_show, dev_priv);
3242}
3243
3244static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3245 size_t len, loff_t *offp)
3246{
3247 struct seq_file *m = file->private_data;
3248 struct drm_i915_private *dev_priv = m->private;
Chris Wilsona0371212019-01-14 14:21:14 +00003249 intel_wakeref_t wakeref;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303250 bool enable;
Chris Wilsond4225a52019-01-14 14:21:23 +00003251 int ret;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303252
3253 ret = kstrtobool_from_user(ubuf, len, &enable);
3254 if (ret < 0)
3255 return ret;
3256
Chris Wilsond4225a52019-01-14 14:21:23 +00003257 with_intel_runtime_pm(dev_priv, wakeref) {
3258 if (!dev_priv->ipc_enabled && enable)
3259 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3260 dev_priv->wm.distrust_bios_wm = true;
3261 dev_priv->ipc_enabled = enable;
3262 intel_enable_ipc(dev_priv);
3263 }
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303264
3265 return len;
3266}
3267
3268static const struct file_operations i915_ipc_status_fops = {
3269 .owner = THIS_MODULE,
3270 .open = i915_ipc_status_open,
3271 .read = seq_read,
3272 .llseek = seq_lseek,
3273 .release = single_release,
3274 .write = i915_ipc_status_write
3275};
3276
Damien Lespiauc5511e42014-11-04 17:06:51 +00003277static int i915_ddb_info(struct seq_file *m, void *unused)
3278{
David Weinehall36cdd012016-08-22 13:59:31 +03003279 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3280 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003281 struct skl_ddb_entry *entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003282 struct intel_crtc *crtc;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003283
David Weinehall36cdd012016-08-22 13:59:31 +03003284 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003285 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003286
Damien Lespiauc5511e42014-11-04 17:06:51 +00003287 drm_modeset_lock_all(dev);
3288
Damien Lespiauc5511e42014-11-04 17:06:51 +00003289 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3290
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003291 for_each_intel_crtc(&dev_priv->drm, crtc) {
3292 struct intel_crtc_state *crtc_state =
3293 to_intel_crtc_state(crtc->base.state);
3294 enum pipe pipe = crtc->pipe;
3295 enum plane_id plane_id;
3296
Damien Lespiauc5511e42014-11-04 17:06:51 +00003297 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3298
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003299 for_each_plane_id_on_crtc(crtc, plane_id) {
3300 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3301 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
Damien Lespiauc5511e42014-11-04 17:06:51 +00003302 entry->start, entry->end,
3303 skl_ddb_entry_size(entry));
3304 }
3305
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003306 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003307 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3308 entry->end, skl_ddb_entry_size(entry));
3309 }
3310
3311 drm_modeset_unlock_all(dev);
3312
3313 return 0;
3314}
3315
Vandana Kannana54746e2015-03-03 20:53:10 +05303316static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003317 struct drm_device *dev,
3318 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303319{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003320 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303321 struct i915_drrs *drrs = &dev_priv->drrs;
3322 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003323 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003324 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303325
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003326 drm_connector_list_iter_begin(dev, &conn_iter);
3327 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003328 if (connector->state->crtc != &intel_crtc->base)
3329 continue;
3330
3331 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303332 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003333 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303334
3335 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3336 seq_puts(m, "\tVBT: DRRS_type: Static");
3337 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3338 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3339 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3340 seq_puts(m, "\tVBT: DRRS_type: None");
3341 else
3342 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3343
3344 seq_puts(m, "\n\n");
3345
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003346 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303347 struct intel_panel *panel;
3348
3349 mutex_lock(&drrs->mutex);
3350 /* DRRS Supported */
3351 seq_puts(m, "\tDRRS Supported: Yes\n");
3352
3353 /* disable_drrs() will make drrs->dp NULL */
3354 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303355 seq_puts(m, "Idleness DRRS: Disabled\n");
3356 if (dev_priv->psr.enabled)
3357 seq_puts(m,
3358 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303359 mutex_unlock(&drrs->mutex);
3360 return;
3361 }
3362
3363 panel = &drrs->dp->attached_connector->panel;
3364 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3365 drrs->busy_frontbuffer_bits);
3366
3367 seq_puts(m, "\n\t\t");
3368 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3369 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3370 vrefresh = panel->fixed_mode->vrefresh;
3371 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3372 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3373 vrefresh = panel->downclock_mode->vrefresh;
3374 } else {
3375 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3376 drrs->refresh_rate_type);
3377 mutex_unlock(&drrs->mutex);
3378 return;
3379 }
3380 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3381
3382 seq_puts(m, "\n\t\t");
3383 mutex_unlock(&drrs->mutex);
3384 } else {
3385 /* DRRS not supported. Print the VBT parameter*/
3386 seq_puts(m, "\tDRRS Supported : No");
3387 }
3388 seq_puts(m, "\n");
3389}
3390
3391static int i915_drrs_status(struct seq_file *m, void *unused)
3392{
David Weinehall36cdd012016-08-22 13:59:31 +03003393 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3394 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303395 struct intel_crtc *intel_crtc;
3396 int active_crtc_cnt = 0;
3397
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003398 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303399 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003400 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303401 active_crtc_cnt++;
3402 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3403
3404 drrs_status_per_crtc(m, dev, intel_crtc);
3405 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303406 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003407 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303408
3409 if (!active_crtc_cnt)
3410 seq_puts(m, "No active crtc found\n");
3411
3412 return 0;
3413}
3414
Dave Airlie11bed952014-05-12 15:22:27 +10003415static int i915_dp_mst_info(struct seq_file *m, void *unused)
3416{
David Weinehall36cdd012016-08-22 13:59:31 +03003417 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3418 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003419 struct intel_encoder *intel_encoder;
3420 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003421 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003422 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003423
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003424 drm_connector_list_iter_begin(dev, &conn_iter);
3425 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003426 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003427 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003428
3429 intel_encoder = intel_attached_encoder(connector);
3430 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3431 continue;
3432
3433 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003434 if (!intel_dig_port->dp.can_mst)
3435 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003436
Jim Bride40ae80c2016-04-14 10:18:37 -07003437 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003438 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003439 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3440 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003441 drm_connector_list_iter_end(&conn_iter);
3442
Dave Airlie11bed952014-05-12 15:22:27 +10003443 return 0;
3444}
3445
Todd Previteeb3394fa2015-04-18 00:04:19 -07003446static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003447 const char __user *ubuf,
3448 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003449{
3450 char *input_buffer;
3451 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003452 struct drm_device *dev;
3453 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003454 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003455 struct intel_dp *intel_dp;
3456 int val = 0;
3457
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303458 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003459
Todd Previteeb3394fa2015-04-18 00:04:19 -07003460 if (len == 0)
3461 return 0;
3462
Geliang Tang261aeba2017-05-06 23:40:17 +08003463 input_buffer = memdup_user_nul(ubuf, len);
3464 if (IS_ERR(input_buffer))
3465 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003466
Todd Previteeb3394fa2015-04-18 00:04:19 -07003467 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3468
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003469 drm_connector_list_iter_begin(dev, &conn_iter);
3470 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003471 struct intel_encoder *encoder;
3472
Todd Previteeb3394fa2015-04-18 00:04:19 -07003473 if (connector->connector_type !=
3474 DRM_MODE_CONNECTOR_DisplayPort)
3475 continue;
3476
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003477 encoder = to_intel_encoder(connector->encoder);
3478 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3479 continue;
3480
3481 if (encoder && connector->status == connector_status_connected) {
3482 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003483 status = kstrtoint(input_buffer, 10, &val);
3484 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003485 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003486 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3487 /* To prevent erroneous activation of the compliance
3488 * testing code, only accept an actual value of 1 here
3489 */
3490 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003491 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003492 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003493 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003494 }
3495 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003496 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003497 kfree(input_buffer);
3498 if (status < 0)
3499 return status;
3500
3501 *offp += len;
3502 return len;
3503}
3504
3505static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3506{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003507 struct drm_i915_private *dev_priv = m->private;
3508 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003509 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003510 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003511 struct intel_dp *intel_dp;
3512
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003513 drm_connector_list_iter_begin(dev, &conn_iter);
3514 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003515 struct intel_encoder *encoder;
3516
Todd Previteeb3394fa2015-04-18 00:04:19 -07003517 if (connector->connector_type !=
3518 DRM_MODE_CONNECTOR_DisplayPort)
3519 continue;
3520
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003521 encoder = to_intel_encoder(connector->encoder);
3522 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3523 continue;
3524
3525 if (encoder && connector->status == connector_status_connected) {
3526 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003527 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003528 seq_puts(m, "1");
3529 else
3530 seq_puts(m, "0");
3531 } else
3532 seq_puts(m, "0");
3533 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003534 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003535
3536 return 0;
3537}
3538
3539static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003540 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003541{
David Weinehall36cdd012016-08-22 13:59:31 +03003542 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003543 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003544}
3545
3546static const struct file_operations i915_displayport_test_active_fops = {
3547 .owner = THIS_MODULE,
3548 .open = i915_displayport_test_active_open,
3549 .read = seq_read,
3550 .llseek = seq_lseek,
3551 .release = single_release,
3552 .write = i915_displayport_test_active_write
3553};
3554
3555static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3556{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003557 struct drm_i915_private *dev_priv = m->private;
3558 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003559 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003560 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003561 struct intel_dp *intel_dp;
3562
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003563 drm_connector_list_iter_begin(dev, &conn_iter);
3564 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003565 struct intel_encoder *encoder;
3566
Todd Previteeb3394fa2015-04-18 00:04:19 -07003567 if (connector->connector_type !=
3568 DRM_MODE_CONNECTOR_DisplayPort)
3569 continue;
3570
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003571 encoder = to_intel_encoder(connector->encoder);
3572 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3573 continue;
3574
3575 if (encoder && connector->status == connector_status_connected) {
3576 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003577 if (intel_dp->compliance.test_type ==
3578 DP_TEST_LINK_EDID_READ)
3579 seq_printf(m, "%lx",
3580 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003581 else if (intel_dp->compliance.test_type ==
3582 DP_TEST_LINK_VIDEO_PATTERN) {
3583 seq_printf(m, "hdisplay: %d\n",
3584 intel_dp->compliance.test_data.hdisplay);
3585 seq_printf(m, "vdisplay: %d\n",
3586 intel_dp->compliance.test_data.vdisplay);
3587 seq_printf(m, "bpc: %u\n",
3588 intel_dp->compliance.test_data.bpc);
3589 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003590 } else
3591 seq_puts(m, "0");
3592 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003593 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003594
3595 return 0;
3596}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003597DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003598
3599static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3600{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003601 struct drm_i915_private *dev_priv = m->private;
3602 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003603 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003604 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003605 struct intel_dp *intel_dp;
3606
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003607 drm_connector_list_iter_begin(dev, &conn_iter);
3608 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003609 struct intel_encoder *encoder;
3610
Todd Previteeb3394fa2015-04-18 00:04:19 -07003611 if (connector->connector_type !=
3612 DRM_MODE_CONNECTOR_DisplayPort)
3613 continue;
3614
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003615 encoder = to_intel_encoder(connector->encoder);
3616 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3617 continue;
3618
3619 if (encoder && connector->status == connector_status_connected) {
3620 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003621 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003622 } else
3623 seq_puts(m, "0");
3624 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003625 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003626
3627 return 0;
3628}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003629DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003630
Jani Nikulae5315212019-01-16 11:15:23 +02003631static void wm_latency_show(struct seq_file *m, const u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003632{
David Weinehall36cdd012016-08-22 13:59:31 +03003633 struct drm_i915_private *dev_priv = m->private;
3634 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003635 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003636 int num_levels;
3637
David Weinehall36cdd012016-08-22 13:59:31 +03003638 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003639 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003640 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003641 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003642 else if (IS_G4X(dev_priv))
3643 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003644 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003645 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003646
3647 drm_modeset_lock_all(dev);
3648
3649 for (level = 0; level < num_levels; level++) {
3650 unsigned int latency = wm[level];
3651
Damien Lespiau97e94b22014-11-04 17:06:50 +00003652 /*
3653 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003654 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003655 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003656 if (INTEL_GEN(dev_priv) >= 9 ||
3657 IS_VALLEYVIEW(dev_priv) ||
3658 IS_CHERRYVIEW(dev_priv) ||
3659 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003660 latency *= 10;
3661 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003662 latency *= 5;
3663
3664 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003665 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003666 }
3667
3668 drm_modeset_unlock_all(dev);
3669}
3670
3671static int pri_wm_latency_show(struct seq_file *m, void *data)
3672{
David Weinehall36cdd012016-08-22 13:59:31 +03003673 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003674 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003675
David Weinehall36cdd012016-08-22 13:59:31 +03003676 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003677 latencies = dev_priv->wm.skl_latency;
3678 else
David Weinehall36cdd012016-08-22 13:59:31 +03003679 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003680
3681 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003682
3683 return 0;
3684}
3685
3686static int spr_wm_latency_show(struct seq_file *m, void *data)
3687{
David Weinehall36cdd012016-08-22 13:59:31 +03003688 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003689 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003690
David Weinehall36cdd012016-08-22 13:59:31 +03003691 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003692 latencies = dev_priv->wm.skl_latency;
3693 else
David Weinehall36cdd012016-08-22 13:59:31 +03003694 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003695
3696 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003697
3698 return 0;
3699}
3700
3701static int cur_wm_latency_show(struct seq_file *m, void *data)
3702{
David Weinehall36cdd012016-08-22 13:59:31 +03003703 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003704 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003705
David Weinehall36cdd012016-08-22 13:59:31 +03003706 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003707 latencies = dev_priv->wm.skl_latency;
3708 else
David Weinehall36cdd012016-08-22 13:59:31 +03003709 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003710
3711 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003712
3713 return 0;
3714}
3715
3716static int pri_wm_latency_open(struct inode *inode, struct file *file)
3717{
David Weinehall36cdd012016-08-22 13:59:31 +03003718 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003719
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003720 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003721 return -ENODEV;
3722
David Weinehall36cdd012016-08-22 13:59:31 +03003723 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003724}
3725
3726static int spr_wm_latency_open(struct inode *inode, struct file *file)
3727{
David Weinehall36cdd012016-08-22 13:59:31 +03003728 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003729
David Weinehall36cdd012016-08-22 13:59:31 +03003730 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003731 return -ENODEV;
3732
David Weinehall36cdd012016-08-22 13:59:31 +03003733 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003734}
3735
3736static int cur_wm_latency_open(struct inode *inode, struct file *file)
3737{
David Weinehall36cdd012016-08-22 13:59:31 +03003738 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003739
David Weinehall36cdd012016-08-22 13:59:31 +03003740 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003741 return -ENODEV;
3742
David Weinehall36cdd012016-08-22 13:59:31 +03003743 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003744}
3745
3746static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Jani Nikulae5315212019-01-16 11:15:23 +02003747 size_t len, loff_t *offp, u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003748{
3749 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003750 struct drm_i915_private *dev_priv = m->private;
3751 struct drm_device *dev = &dev_priv->drm;
Jani Nikulae5315212019-01-16 11:15:23 +02003752 u16 new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003753 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003754 int level;
3755 int ret;
3756 char tmp[32];
3757
David Weinehall36cdd012016-08-22 13:59:31 +03003758 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003759 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003760 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003761 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003762 else if (IS_G4X(dev_priv))
3763 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003764 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003765 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003766
Ville Syrjälä369a1342014-01-22 14:36:08 +02003767 if (len >= sizeof(tmp))
3768 return -EINVAL;
3769
3770 if (copy_from_user(tmp, ubuf, len))
3771 return -EFAULT;
3772
3773 tmp[len] = '\0';
3774
Damien Lespiau97e94b22014-11-04 17:06:50 +00003775 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3776 &new[0], &new[1], &new[2], &new[3],
3777 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003778 if (ret != num_levels)
3779 return -EINVAL;
3780
3781 drm_modeset_lock_all(dev);
3782
3783 for (level = 0; level < num_levels; level++)
3784 wm[level] = new[level];
3785
3786 drm_modeset_unlock_all(dev);
3787
3788 return len;
3789}
3790
3791
3792static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3793 size_t len, loff_t *offp)
3794{
3795 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003796 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003797 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003798
David Weinehall36cdd012016-08-22 13:59:31 +03003799 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003800 latencies = dev_priv->wm.skl_latency;
3801 else
David Weinehall36cdd012016-08-22 13:59:31 +03003802 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003803
3804 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003805}
3806
3807static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3808 size_t len, loff_t *offp)
3809{
3810 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003811 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003812 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003813
David Weinehall36cdd012016-08-22 13:59:31 +03003814 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003815 latencies = dev_priv->wm.skl_latency;
3816 else
David Weinehall36cdd012016-08-22 13:59:31 +03003817 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003818
3819 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003820}
3821
3822static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3823 size_t len, loff_t *offp)
3824{
3825 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003826 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003827 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003828
David Weinehall36cdd012016-08-22 13:59:31 +03003829 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003830 latencies = dev_priv->wm.skl_latency;
3831 else
David Weinehall36cdd012016-08-22 13:59:31 +03003832 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003833
3834 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003835}
3836
3837static const struct file_operations i915_pri_wm_latency_fops = {
3838 .owner = THIS_MODULE,
3839 .open = pri_wm_latency_open,
3840 .read = seq_read,
3841 .llseek = seq_lseek,
3842 .release = single_release,
3843 .write = pri_wm_latency_write
3844};
3845
3846static const struct file_operations i915_spr_wm_latency_fops = {
3847 .owner = THIS_MODULE,
3848 .open = spr_wm_latency_open,
3849 .read = seq_read,
3850 .llseek = seq_lseek,
3851 .release = single_release,
3852 .write = spr_wm_latency_write
3853};
3854
3855static const struct file_operations i915_cur_wm_latency_fops = {
3856 .owner = THIS_MODULE,
3857 .open = cur_wm_latency_open,
3858 .read = seq_read,
3859 .llseek = seq_lseek,
3860 .release = single_release,
3861 .write = cur_wm_latency_write
3862};
3863
Kees Cook647416f2013-03-10 14:10:06 -07003864static int
3865i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003866{
David Weinehall36cdd012016-08-22 13:59:31 +03003867 struct drm_i915_private *dev_priv = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003868
Chris Wilsond98c52c2016-04-13 17:35:05 +01003869 *val = i915_terminally_wedged(&dev_priv->gpu_error);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003870
Kees Cook647416f2013-03-10 14:10:06 -07003871 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003872}
3873
Kees Cook647416f2013-03-10 14:10:06 -07003874static int
3875i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003876{
Chris Wilson598b6b52017-03-25 13:47:35 +00003877 struct drm_i915_private *i915 = data;
Imre Deakd46c0512014-04-14 20:24:27 +03003878
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003879 /*
3880 * There is no safeguard against this debugfs entry colliding
3881 * with the hangcheck calling same i915_handle_error() in
3882 * parallel, causing an explosion. For now we assume that the
3883 * test harness is responsible enough not to inject gpu hangs
3884 * while it is writing to 'i915_wedged'
3885 */
3886
Chris Wilson598b6b52017-03-25 13:47:35 +00003887 if (i915_reset_backoff(&i915->gpu_error))
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003888 return -EAGAIN;
3889
Chris Wilsonce800752018-03-20 10:04:49 +00003890 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3891 "Manually set wedged engine mask = %llx", val);
Kees Cook647416f2013-03-10 14:10:06 -07003892 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003893}
3894
Kees Cook647416f2013-03-10 14:10:06 -07003895DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3896 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03003897 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003898
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003899#define DROP_UNBOUND BIT(0)
3900#define DROP_BOUND BIT(1)
3901#define DROP_RETIRE BIT(2)
3902#define DROP_ACTIVE BIT(3)
3903#define DROP_FREED BIT(4)
3904#define DROP_SHRINK_ALL BIT(5)
3905#define DROP_IDLE BIT(6)
Chris Wilson6b048702018-09-03 09:33:37 +01003906#define DROP_RESET_ACTIVE BIT(7)
3907#define DROP_RESET_SEQNO BIT(8)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003908#define DROP_ALL (DROP_UNBOUND | \
3909 DROP_BOUND | \
3910 DROP_RETIRE | \
3911 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00003912 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003913 DROP_SHRINK_ALL |\
Chris Wilson6b048702018-09-03 09:33:37 +01003914 DROP_IDLE | \
3915 DROP_RESET_ACTIVE | \
3916 DROP_RESET_SEQNO)
Kees Cook647416f2013-03-10 14:10:06 -07003917static int
3918i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003919{
Kees Cook647416f2013-03-10 14:10:06 -07003920 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00003921
Kees Cook647416f2013-03-10 14:10:06 -07003922 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003923}
3924
Kees Cook647416f2013-03-10 14:10:06 -07003925static int
3926i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003927{
Chris Wilson6b048702018-09-03 09:33:37 +01003928 struct drm_i915_private *i915 = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003929 intel_wakeref_t wakeref;
Chris Wilson00c26cf2017-05-24 17:26:53 +01003930 int ret = 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003931
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003932 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3933 val, val & DROP_ALL);
Chris Wilsona0371212019-01-14 14:21:14 +00003934 wakeref = intel_runtime_pm_get(i915);
Chris Wilsondd624af2013-01-15 12:39:35 +00003935
Chris Wilsonad4062d2019-01-28 01:02:18 +00003936 if (val & DROP_RESET_ACTIVE &&
3937 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
Chris Wilson6b048702018-09-03 09:33:37 +01003938 i915_gem_set_wedged(i915);
3939
Chris Wilsondd624af2013-01-15 12:39:35 +00003940 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3941 * on ioctls on -EAGAIN. */
Chris Wilson6b048702018-09-03 09:33:37 +01003942 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3943 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00003944 if (ret)
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03003945 goto out;
Chris Wilsondd624af2013-01-15 12:39:35 +00003946
Chris Wilson00c26cf2017-05-24 17:26:53 +01003947 if (val & DROP_ACTIVE)
Chris Wilson6b048702018-09-03 09:33:37 +01003948 ret = i915_gem_wait_for_idle(i915,
Chris Wilson00c26cf2017-05-24 17:26:53 +01003949 I915_WAIT_INTERRUPTIBLE |
Chris Wilsonec625fb2018-07-09 13:20:42 +01003950 I915_WAIT_LOCKED,
3951 MAX_SCHEDULE_TIMEOUT);
Chris Wilson00c26cf2017-05-24 17:26:53 +01003952
Chris Wilson6b048702018-09-03 09:33:37 +01003953 if (val & DROP_RETIRE)
3954 i915_retire_requests(i915);
3955
3956 mutex_unlock(&i915->drm.struct_mutex);
3957 }
3958
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00003959 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
Chris Wilson6b048702018-09-03 09:33:37 +01003960 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003961
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003962 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01003963 if (val & DROP_BOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003964 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01003965
Chris Wilson21ab4e72014-09-09 11:16:08 +01003966 if (val & DROP_UNBOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003967 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00003968
Chris Wilson8eadc192017-03-08 14:46:22 +00003969 if (val & DROP_SHRINK_ALL)
Chris Wilson6b048702018-09-03 09:33:37 +01003970 i915_gem_shrink_all(i915);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003971 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00003972
Chris Wilson4dfacb02018-05-31 09:22:43 +01003973 if (val & DROP_IDLE) {
3974 do {
Chris Wilson6b048702018-09-03 09:33:37 +01003975 if (READ_ONCE(i915->gt.active_requests))
3976 flush_delayed_work(&i915->gt.retire_work);
3977 drain_delayed_work(&i915->gt.idle_work);
3978 } while (READ_ONCE(i915->gt.awake));
Chris Wilson4dfacb02018-05-31 09:22:43 +01003979 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003980
Chris Wilsonc9c704712018-02-19 22:06:31 +00003981 if (val & DROP_FREED)
Chris Wilson6b048702018-09-03 09:33:37 +01003982 i915_gem_drain_freed_objects(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003983
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03003984out:
Chris Wilsona0371212019-01-14 14:21:14 +00003985 intel_runtime_pm_put(i915, wakeref);
Chris Wilson9d3eb2c2018-10-15 12:58:56 +01003986
Kees Cook647416f2013-03-10 14:10:06 -07003987 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00003988}
3989
Kees Cook647416f2013-03-10 14:10:06 -07003990DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3991 i915_drop_caches_get, i915_drop_caches_set,
3992 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00003993
Kees Cook647416f2013-03-10 14:10:06 -07003994static int
Kees Cook647416f2013-03-10 14:10:06 -07003995i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003996{
David Weinehall36cdd012016-08-22 13:59:31 +03003997 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003998 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00003999 u32 snpcr = 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004000
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08004001 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004002 return -ENODEV;
4003
Chris Wilsond4225a52019-01-14 14:21:23 +00004004 with_intel_runtime_pm(dev_priv, wakeref)
4005 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004006
Kees Cook647416f2013-03-10 14:10:06 -07004007 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004008
Kees Cook647416f2013-03-10 14:10:06 -07004009 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004010}
4011
Kees Cook647416f2013-03-10 14:10:06 -07004012static int
4013i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004014{
David Weinehall36cdd012016-08-22 13:59:31 +03004015 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00004016 intel_wakeref_t wakeref;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004017
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08004018 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004019 return -ENODEV;
4020
Kees Cook647416f2013-03-10 14:10:06 -07004021 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004022 return -EINVAL;
4023
Kees Cook647416f2013-03-10 14:10:06 -07004024 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Chris Wilsond4225a52019-01-14 14:21:23 +00004025 with_intel_runtime_pm(dev_priv, wakeref) {
4026 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004027
Chris Wilsond4225a52019-01-14 14:21:23 +00004028 /* Update the cache sharing policy here as well */
4029 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4030 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4031 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
4032 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4033 }
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004034
Kees Cook647416f2013-03-10 14:10:06 -07004035 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004036}
4037
Kees Cook647416f2013-03-10 14:10:06 -07004038DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4039 i915_cache_sharing_get, i915_cache_sharing_set,
4040 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004041
David Weinehall36cdd012016-08-22 13:59:31 +03004042static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004043 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004044{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004045#define SS_MAX 2
4046 const int ss_max = SS_MAX;
4047 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004048 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004049
4050 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4051 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4052 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4053 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4054
4055 for (ss = 0; ss < ss_max; ss++) {
4056 unsigned int eu_cnt;
4057
4058 if (sig1[ss] & CHV_SS_PG_ENABLE)
4059 /* skip disabled subslice */
4060 continue;
4061
Imre Deakf08a0c92016-08-31 19:13:04 +03004062 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004063 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004064 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4065 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4066 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4067 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004068 sseu->eu_total += eu_cnt;
4069 sseu->eu_per_subslice = max_t(unsigned int,
4070 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004071 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004072#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004073}
4074
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004075static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4076 struct sseu_dev_info *sseu)
4077{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004078#define SS_MAX 6
Jani Nikula02584042018-12-31 16:56:41 +02004079 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004080 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004081 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004082
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004083 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004084 /*
4085 * FIXME: Valid SS Mask respects the spec and read
Alexandre Belloni3c64ea82018-11-20 16:14:15 +01004086 * only valid bits for those registers, excluding reserved
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004087 * although this seems wrong because it would leave many
4088 * subslices without ACK.
4089 */
4090 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4091 GEN10_PGCTL_VALID_SS_MASK(s);
4092 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4093 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4094 }
4095
4096 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4097 GEN9_PGCTL_SSA_EU19_ACK |
4098 GEN9_PGCTL_SSA_EU210_ACK |
4099 GEN9_PGCTL_SSA_EU311_ACK;
4100 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4101 GEN9_PGCTL_SSB_EU19_ACK |
4102 GEN9_PGCTL_SSB_EU210_ACK |
4103 GEN9_PGCTL_SSB_EU311_ACK;
4104
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004105 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004106 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4107 /* skip disabled slice */
4108 continue;
4109
4110 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004111 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004112
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004113 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004114 unsigned int eu_cnt;
4115
4116 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4117 /* skip disabled subslice */
4118 continue;
4119
4120 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4121 eu_mask[ss % 2]);
4122 sseu->eu_total += eu_cnt;
4123 sseu->eu_per_subslice = max_t(unsigned int,
4124 sseu->eu_per_subslice,
4125 eu_cnt);
4126 }
4127 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004128#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004129}
4130
David Weinehall36cdd012016-08-22 13:59:31 +03004131static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004132 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004133{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004134#define SS_MAX 3
Jani Nikula02584042018-12-31 16:56:41 +02004135 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004136 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004137 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004138
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004139 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004140 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4141 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4142 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4143 }
4144
Jeff McGee5d395252015-04-03 18:13:17 -07004145 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4146 GEN9_PGCTL_SSA_EU19_ACK |
4147 GEN9_PGCTL_SSA_EU210_ACK |
4148 GEN9_PGCTL_SSA_EU311_ACK;
4149 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4150 GEN9_PGCTL_SSB_EU19_ACK |
4151 GEN9_PGCTL_SSB_EU210_ACK |
4152 GEN9_PGCTL_SSB_EU311_ACK;
4153
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004154 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004155 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4156 /* skip disabled slice */
4157 continue;
4158
Imre Deakf08a0c92016-08-31 19:13:04 +03004159 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004160
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004161 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004162 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004163 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004164
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004165 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004166 unsigned int eu_cnt;
4167
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004168 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004169 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4170 /* skip disabled subslice */
4171 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004172
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004173 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004174 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004175
Jeff McGee5d395252015-04-03 18:13:17 -07004176 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4177 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004178 sseu->eu_total += eu_cnt;
4179 sseu->eu_per_subslice = max_t(unsigned int,
4180 sseu->eu_per_subslice,
4181 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004182 }
4183 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004184#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004185}
4186
David Weinehall36cdd012016-08-22 13:59:31 +03004187static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004188 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004189{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004190 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004191 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004192
Imre Deakf08a0c92016-08-31 19:13:04 +03004193 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004194
Imre Deakf08a0c92016-08-31 19:13:04 +03004195 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004196 sseu->eu_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004197 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004198 for (s = 0; s < fls(sseu->slice_mask); s++) {
4199 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004200 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004201 }
Imre Deak57ec1712016-08-31 19:13:05 +03004202 sseu->eu_total = sseu->eu_per_subslice *
4203 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004204
4205 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004206 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004207 u8 subslice_7eu =
Jani Nikula02584042018-12-31 16:56:41 +02004208 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004209
Imre Deak915490d2016-08-31 19:13:01 +03004210 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004211 }
4212 }
4213}
4214
Imre Deak615d8902016-08-31 19:13:03 +03004215static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4216 const struct sseu_dev_info *sseu)
4217{
4218 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4219 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004220 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004221
Imre Deakc67ba532016-08-31 19:13:06 +03004222 seq_printf(m, " %s Slice Mask: %04x\n", type,
4223 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004224 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004225 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004226 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004227 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004228 for (s = 0; s < fls(sseu->slice_mask); s++) {
4229 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4230 s, hweight8(sseu->subslice_mask[s]));
4231 }
Imre Deak615d8902016-08-31 19:13:03 +03004232 seq_printf(m, " %s EU Total: %u\n", type,
4233 sseu->eu_total);
4234 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4235 sseu->eu_per_subslice);
4236
4237 if (!is_available_info)
4238 return;
4239
4240 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4241 if (HAS_POOLED_EU(dev_priv))
4242 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4243
4244 seq_printf(m, " Has Slice Power Gating: %s\n",
4245 yesno(sseu->has_slice_pg));
4246 seq_printf(m, " Has Subslice Power Gating: %s\n",
4247 yesno(sseu->has_subslice_pg));
4248 seq_printf(m, " Has EU Power Gating: %s\n",
4249 yesno(sseu->has_eu_pg));
4250}
4251
Jeff McGee38732182015-02-13 10:27:54 -06004252static int i915_sseu_status(struct seq_file *m, void *unused)
4253{
David Weinehall36cdd012016-08-22 13:59:31 +03004254 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004255 struct sseu_dev_info sseu;
Chris Wilsona0371212019-01-14 14:21:14 +00004256 intel_wakeref_t wakeref;
Jeff McGee38732182015-02-13 10:27:54 -06004257
David Weinehall36cdd012016-08-22 13:59:31 +03004258 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004259 return -ENODEV;
4260
4261 seq_puts(m, "SSEU Device Info\n");
Jani Nikula02584042018-12-31 16:56:41 +02004262 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004263
Jeff McGee7f992ab2015-02-13 10:27:55 -06004264 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004265 memset(&sseu, 0, sizeof(sseu));
Jani Nikula02584042018-12-31 16:56:41 +02004266 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4267 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004268 sseu.max_eus_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004269 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004270
Chris Wilsond4225a52019-01-14 14:21:23 +00004271 with_intel_runtime_pm(dev_priv, wakeref) {
4272 if (IS_CHERRYVIEW(dev_priv))
4273 cherryview_sseu_device_status(dev_priv, &sseu);
4274 else if (IS_BROADWELL(dev_priv))
4275 broadwell_sseu_device_status(dev_priv, &sseu);
4276 else if (IS_GEN(dev_priv, 9))
4277 gen9_sseu_device_status(dev_priv, &sseu);
4278 else if (INTEL_GEN(dev_priv) >= 10)
4279 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004280 }
David Weinehall238010e2016-08-01 17:33:27 +03004281
Imre Deak615d8902016-08-31 19:13:03 +03004282 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004283
Jeff McGee38732182015-02-13 10:27:54 -06004284 return 0;
4285}
4286
Ben Widawsky6d794d42011-04-25 11:25:56 -07004287static int i915_forcewake_open(struct inode *inode, struct file *file)
4288{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004289 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004290
Chris Wilsond7a133d2017-09-07 14:44:41 +01004291 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004292 return 0;
4293
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004294 file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
Chris Wilsond7a133d2017-09-07 14:44:41 +01004295 intel_uncore_forcewake_user_get(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004296
4297 return 0;
4298}
4299
Ben Widawskyc43b5632012-04-16 14:07:40 -07004300static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004301{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004302 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004303
Chris Wilsond7a133d2017-09-07 14:44:41 +01004304 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004305 return 0;
4306
Chris Wilsond7a133d2017-09-07 14:44:41 +01004307 intel_uncore_forcewake_user_put(i915);
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004308 intel_runtime_pm_put(i915,
4309 (intel_wakeref_t)(uintptr_t)file->private_data);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004310
4311 return 0;
4312}
4313
4314static const struct file_operations i915_forcewake_fops = {
4315 .owner = THIS_MODULE,
4316 .open = i915_forcewake_open,
4317 .release = i915_forcewake_release,
4318};
4319
Lyude317eaa92017-02-03 21:18:25 -05004320static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4321{
4322 struct drm_i915_private *dev_priv = m->private;
4323 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4324
Lyude Paul6fc5d782018-11-20 19:37:17 -05004325 /* Synchronize with everything first in case there's been an HPD
4326 * storm, but we haven't finished handling it in the kernel yet
4327 */
4328 synchronize_irq(dev_priv->drm.irq);
4329 flush_work(&dev_priv->hotplug.dig_port_work);
4330 flush_work(&dev_priv->hotplug.hotplug_work);
4331
Lyude317eaa92017-02-03 21:18:25 -05004332 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4333 seq_printf(m, "Detected: %s\n",
4334 yesno(delayed_work_pending(&hotplug->reenable_work)));
4335
4336 return 0;
4337}
4338
4339static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4340 const char __user *ubuf, size_t len,
4341 loff_t *offp)
4342{
4343 struct seq_file *m = file->private_data;
4344 struct drm_i915_private *dev_priv = m->private;
4345 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4346 unsigned int new_threshold;
4347 int i;
4348 char *newline;
4349 char tmp[16];
4350
4351 if (len >= sizeof(tmp))
4352 return -EINVAL;
4353
4354 if (copy_from_user(tmp, ubuf, len))
4355 return -EFAULT;
4356
4357 tmp[len] = '\0';
4358
4359 /* Strip newline, if any */
4360 newline = strchr(tmp, '\n');
4361 if (newline)
4362 *newline = '\0';
4363
4364 if (strcmp(tmp, "reset") == 0)
4365 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4366 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4367 return -EINVAL;
4368
4369 if (new_threshold > 0)
4370 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4371 new_threshold);
4372 else
4373 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4374
4375 spin_lock_irq(&dev_priv->irq_lock);
4376 hotplug->hpd_storm_threshold = new_threshold;
4377 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4378 for_each_hpd_pin(i)
4379 hotplug->stats[i].count = 0;
4380 spin_unlock_irq(&dev_priv->irq_lock);
4381
4382 /* Re-enable hpd immediately if we were in an irq storm */
4383 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4384
4385 return len;
4386}
4387
4388static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4389{
4390 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4391}
4392
4393static const struct file_operations i915_hpd_storm_ctl_fops = {
4394 .owner = THIS_MODULE,
4395 .open = i915_hpd_storm_ctl_open,
4396 .read = seq_read,
4397 .llseek = seq_lseek,
4398 .release = single_release,
4399 .write = i915_hpd_storm_ctl_write
4400};
4401
Lyude Paul9a64c652018-11-06 16:30:16 -05004402static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4403{
4404 struct drm_i915_private *dev_priv = m->private;
4405
4406 seq_printf(m, "Enabled: %s\n",
4407 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4408
4409 return 0;
4410}
4411
4412static int
4413i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4414{
4415 return single_open(file, i915_hpd_short_storm_ctl_show,
4416 inode->i_private);
4417}
4418
4419static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4420 const char __user *ubuf,
4421 size_t len, loff_t *offp)
4422{
4423 struct seq_file *m = file->private_data;
4424 struct drm_i915_private *dev_priv = m->private;
4425 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4426 char *newline;
4427 char tmp[16];
4428 int i;
4429 bool new_state;
4430
4431 if (len >= sizeof(tmp))
4432 return -EINVAL;
4433
4434 if (copy_from_user(tmp, ubuf, len))
4435 return -EFAULT;
4436
4437 tmp[len] = '\0';
4438
4439 /* Strip newline, if any */
4440 newline = strchr(tmp, '\n');
4441 if (newline)
4442 *newline = '\0';
4443
4444 /* Reset to the "default" state for this system */
4445 if (strcmp(tmp, "reset") == 0)
4446 new_state = !HAS_DP_MST(dev_priv);
4447 else if (kstrtobool(tmp, &new_state) != 0)
4448 return -EINVAL;
4449
4450 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4451 new_state ? "En" : "Dis");
4452
4453 spin_lock_irq(&dev_priv->irq_lock);
4454 hotplug->hpd_short_storm_enabled = new_state;
4455 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4456 for_each_hpd_pin(i)
4457 hotplug->stats[i].count = 0;
4458 spin_unlock_irq(&dev_priv->irq_lock);
4459
4460 /* Re-enable hpd immediately if we were in an irq storm */
4461 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4462
4463 return len;
4464}
4465
4466static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4467 .owner = THIS_MODULE,
4468 .open = i915_hpd_short_storm_ctl_open,
4469 .read = seq_read,
4470 .llseek = seq_lseek,
4471 .release = single_release,
4472 .write = i915_hpd_short_storm_ctl_write,
4473};
4474
C, Ramalingam35954e82017-11-08 00:08:23 +05304475static int i915_drrs_ctl_set(void *data, u64 val)
4476{
4477 struct drm_i915_private *dev_priv = data;
4478 struct drm_device *dev = &dev_priv->drm;
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004479 struct intel_crtc *crtc;
C, Ramalingam35954e82017-11-08 00:08:23 +05304480
4481 if (INTEL_GEN(dev_priv) < 7)
4482 return -ENODEV;
4483
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004484 for_each_intel_crtc(dev, crtc) {
4485 struct drm_connector_list_iter conn_iter;
4486 struct intel_crtc_state *crtc_state;
4487 struct drm_connector *connector;
4488 struct drm_crtc_commit *commit;
4489 int ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304490
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004491 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4492 if (ret)
4493 return ret;
4494
4495 crtc_state = to_intel_crtc_state(crtc->base.state);
4496
4497 if (!crtc_state->base.active ||
4498 !crtc_state->has_drrs)
4499 goto out;
4500
4501 commit = crtc_state->base.commit;
4502 if (commit) {
4503 ret = wait_for_completion_interruptible(&commit->hw_done);
4504 if (ret)
4505 goto out;
4506 }
4507
4508 drm_connector_list_iter_begin(dev, &conn_iter);
4509 drm_for_each_connector_iter(connector, &conn_iter) {
4510 struct intel_encoder *encoder;
4511 struct intel_dp *intel_dp;
4512
4513 if (!(crtc_state->base.connector_mask &
4514 drm_connector_mask(connector)))
4515 continue;
4516
4517 encoder = intel_attached_encoder(connector);
C, Ramalingam35954e82017-11-08 00:08:23 +05304518 if (encoder->type != INTEL_OUTPUT_EDP)
4519 continue;
4520
4521 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4522 val ? "en" : "dis", val);
4523
4524 intel_dp = enc_to_intel_dp(&encoder->base);
4525 if (val)
4526 intel_edp_drrs_enable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004527 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304528 else
4529 intel_edp_drrs_disable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004530 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304531 }
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004532 drm_connector_list_iter_end(&conn_iter);
4533
4534out:
4535 drm_modeset_unlock(&crtc->base.mutex);
4536 if (ret)
4537 return ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304538 }
C, Ramalingam35954e82017-11-08 00:08:23 +05304539
4540 return 0;
4541}
4542
4543DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4544
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004545static ssize_t
4546i915_fifo_underrun_reset_write(struct file *filp,
4547 const char __user *ubuf,
4548 size_t cnt, loff_t *ppos)
4549{
4550 struct drm_i915_private *dev_priv = filp->private_data;
4551 struct intel_crtc *intel_crtc;
4552 struct drm_device *dev = &dev_priv->drm;
4553 int ret;
4554 bool reset;
4555
4556 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4557 if (ret)
4558 return ret;
4559
4560 if (!reset)
4561 return cnt;
4562
4563 for_each_intel_crtc(dev, intel_crtc) {
4564 struct drm_crtc_commit *commit;
4565 struct intel_crtc_state *crtc_state;
4566
4567 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4568 if (ret)
4569 return ret;
4570
4571 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4572 commit = crtc_state->base.commit;
4573 if (commit) {
4574 ret = wait_for_completion_interruptible(&commit->hw_done);
4575 if (!ret)
4576 ret = wait_for_completion_interruptible(&commit->flip_done);
4577 }
4578
4579 if (!ret && crtc_state->base.active) {
4580 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4581 pipe_name(intel_crtc->pipe));
4582
4583 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4584 }
4585
4586 drm_modeset_unlock(&intel_crtc->base.mutex);
4587
4588 if (ret)
4589 return ret;
4590 }
4591
4592 ret = intel_fbc_reset_underrun(dev_priv);
4593 if (ret)
4594 return ret;
4595
4596 return cnt;
4597}
4598
4599static const struct file_operations i915_fifo_underrun_reset_ops = {
4600 .owner = THIS_MODULE,
4601 .open = simple_open,
4602 .write = i915_fifo_underrun_reset_write,
4603 .llseek = default_llseek,
4604};
4605
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004606static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004607 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004608 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004609 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004610 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004611 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004612 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004613 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004614 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004615 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004616 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004617 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004618 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004619 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304620 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004621 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004622 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004623 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004624 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004625 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004626 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004627 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004628 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004629 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004630 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004631 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004632 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004633 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004634 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004635 {"i915_swizzle_info", i915_swizzle_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004636 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004637 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004638 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004639 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004640 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004641 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004642 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004643 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004644 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004645 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004646 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004647 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004648 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004649 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004650 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304651 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004652 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004653};
Ben Gamari27c202a2009-07-01 22:26:52 -04004654#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004655
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004656static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004657 const char *name;
4658 const struct file_operations *fops;
4659} i915_debugfs_files[] = {
4660 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004661 {"i915_cache_sharing", &i915_cache_sharing_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004662 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004663#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004664 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004665 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004666#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004667 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004668 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4669 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4670 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004671 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004672 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4673 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304674 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004675 {"i915_guc_log_level", &i915_guc_log_level_fops},
4676 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304677 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
Lyude Paul9a64c652018-11-06 16:30:16 -05004678 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304679 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004680 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4681 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004682};
4683
Chris Wilson1dac8912016-06-24 14:00:17 +01004684int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004685{
Chris Wilson91c8a322016-07-05 10:40:23 +01004686 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004687 struct dentry *ent;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004688 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004689
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004690 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4691 minor->debugfs_root, to_i915(minor->dev),
4692 &i915_forcewake_fops);
4693 if (!ent)
4694 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004695
Daniel Vetter34b96742013-07-04 20:49:44 +02004696 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004697 ent = debugfs_create_file(i915_debugfs_files[i].name,
4698 S_IRUGO | S_IWUSR,
4699 minor->debugfs_root,
4700 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004701 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004702 if (!ent)
4703 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004704 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004705
Ben Gamari27c202a2009-07-01 22:26:52 -04004706 return drm_debugfs_create_files(i915_debugfs_list,
4707 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004708 minor->debugfs_root, minor);
4709}
4710
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004711struct dpcd_block {
4712 /* DPCD dump start address. */
4713 unsigned int offset;
4714 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4715 unsigned int end;
4716 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4717 size_t size;
4718 /* Only valid for eDP. */
4719 bool edp;
4720};
4721
4722static const struct dpcd_block i915_dpcd_debug[] = {
4723 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4724 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4725 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4726 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4727 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4728 { .offset = DP_SET_POWER },
4729 { .offset = DP_EDP_DPCD_REV },
4730 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4731 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4732 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4733};
4734
4735static int i915_dpcd_show(struct seq_file *m, void *data)
4736{
4737 struct drm_connector *connector = m->private;
4738 struct intel_dp *intel_dp =
4739 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Jani Nikulae5315212019-01-16 11:15:23 +02004740 u8 buf[16];
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004741 ssize_t err;
4742 int i;
4743
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004744 if (connector->status != connector_status_connected)
4745 return -ENODEV;
4746
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004747 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4748 const struct dpcd_block *b = &i915_dpcd_debug[i];
4749 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4750
4751 if (b->edp &&
4752 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4753 continue;
4754
4755 /* low tech for now */
4756 if (WARN_ON(size > sizeof(buf)))
4757 continue;
4758
4759 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
Chris Wilson65404c82018-10-10 09:17:06 +01004760 if (err < 0)
4761 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4762 else
4763 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004764 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004765
4766 return 0;
4767}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004768DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004769
David Weinehallecbd6782016-08-23 12:23:56 +03004770static int i915_panel_show(struct seq_file *m, void *data)
4771{
4772 struct drm_connector *connector = m->private;
4773 struct intel_dp *intel_dp =
4774 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4775
4776 if (connector->status != connector_status_connected)
4777 return -ENODEV;
4778
4779 seq_printf(m, "Panel power up delay: %d\n",
4780 intel_dp->panel_power_up_delay);
4781 seq_printf(m, "Panel power down delay: %d\n",
4782 intel_dp->panel_power_down_delay);
4783 seq_printf(m, "Backlight on delay: %d\n",
4784 intel_dp->backlight_on_delay);
4785 seq_printf(m, "Backlight off delay: %d\n",
4786 intel_dp->backlight_off_delay);
4787
4788 return 0;
4789}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004790DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004791
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304792static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4793{
4794 struct drm_connector *connector = m->private;
4795 struct intel_connector *intel_connector = to_intel_connector(connector);
4796
4797 if (connector->status != connector_status_connected)
4798 return -ENODEV;
4799
4800 /* HDCP is supported by connector */
Ramalingam Cd3dacc72018-10-29 15:15:46 +05304801 if (!intel_connector->hdcp.shim)
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304802 return -EINVAL;
4803
4804 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4805 connector->base.id);
4806 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4807 "None" : "HDCP1.4");
4808 seq_puts(m, "\n");
4809
4810 return 0;
4811}
4812DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4813
Manasi Navaree845f092018-12-05 16:54:07 -08004814static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4815{
4816 struct drm_connector *connector = m->private;
4817 struct drm_device *dev = connector->dev;
4818 struct drm_crtc *crtc;
4819 struct intel_dp *intel_dp;
4820 struct drm_modeset_acquire_ctx ctx;
4821 struct intel_crtc_state *crtc_state = NULL;
4822 int ret = 0;
4823 bool try_again = false;
4824
4825 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4826
4827 do {
Manasi Navare6afe8922018-12-19 15:51:20 -08004828 try_again = false;
Manasi Navaree845f092018-12-05 16:54:07 -08004829 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4830 &ctx);
4831 if (ret) {
4832 ret = -EINTR;
4833 break;
4834 }
4835 crtc = connector->state->crtc;
4836 if (connector->status != connector_status_connected || !crtc) {
4837 ret = -ENODEV;
4838 break;
4839 }
4840 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4841 if (ret == -EDEADLK) {
4842 ret = drm_modeset_backoff(&ctx);
4843 if (!ret) {
4844 try_again = true;
4845 continue;
4846 }
4847 break;
4848 } else if (ret) {
4849 break;
4850 }
4851 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4852 crtc_state = to_intel_crtc_state(crtc->state);
4853 seq_printf(m, "DSC_Enabled: %s\n",
4854 yesno(crtc_state->dsc_params.compression_enable));
Radhakrishna Sripadafed85692019-01-09 13:14:14 -08004855 seq_printf(m, "DSC_Sink_Support: %s\n",
4856 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
Manasi Navaree845f092018-12-05 16:54:07 -08004857 if (!intel_dp_is_edp(intel_dp))
4858 seq_printf(m, "FEC_Sink_Support: %s\n",
4859 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4860 } while (try_again);
4861
4862 drm_modeset_drop_locks(&ctx);
4863 drm_modeset_acquire_fini(&ctx);
4864
4865 return ret;
4866}
4867
4868static ssize_t i915_dsc_fec_support_write(struct file *file,
4869 const char __user *ubuf,
4870 size_t len, loff_t *offp)
4871{
4872 bool dsc_enable = false;
4873 int ret;
4874 struct drm_connector *connector =
4875 ((struct seq_file *)file->private_data)->private;
4876 struct intel_encoder *encoder = intel_attached_encoder(connector);
4877 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4878
4879 if (len == 0)
4880 return 0;
4881
4882 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4883 len);
4884
4885 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4886 if (ret < 0)
4887 return ret;
4888
4889 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4890 (dsc_enable) ? "true" : "false");
4891 intel_dp->force_dsc_en = dsc_enable;
4892
4893 *offp += len;
4894 return len;
4895}
4896
4897static int i915_dsc_fec_support_open(struct inode *inode,
4898 struct file *file)
4899{
4900 return single_open(file, i915_dsc_fec_support_show,
4901 inode->i_private);
4902}
4903
4904static const struct file_operations i915_dsc_fec_support_fops = {
4905 .owner = THIS_MODULE,
4906 .open = i915_dsc_fec_support_open,
4907 .read = seq_read,
4908 .llseek = seq_lseek,
4909 .release = single_release,
4910 .write = i915_dsc_fec_support_write
4911};
4912
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004913/**
4914 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4915 * @connector: pointer to a registered drm_connector
4916 *
4917 * Cleanup will be done by drm_connector_unregister() through a call to
4918 * drm_debugfs_connector_remove().
4919 *
4920 * Returns 0 on success, negative error codes on error.
4921 */
4922int i915_debugfs_connector_add(struct drm_connector *connector)
4923{
4924 struct dentry *root = connector->debugfs_entry;
Manasi Navaree845f092018-12-05 16:54:07 -08004925 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004926
4927 /* The connector must have been registered beforehands. */
4928 if (!root)
4929 return -ENODEV;
4930
4931 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4932 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004933 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4934 connector, &i915_dpcd_fops);
4935
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004936 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
David Weinehallecbd6782016-08-23 12:23:56 +03004937 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4938 connector, &i915_panel_fops);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004939 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4940 connector, &i915_psr_sink_status_fops);
4941 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004942
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304943 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4944 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4945 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4946 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4947 connector, &i915_hdcp_sink_capability_fops);
4948 }
4949
Manasi Navaree845f092018-12-05 16:54:07 -08004950 if (INTEL_GEN(dev_priv) >= 10 &&
4951 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4952 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4953 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4954 connector, &i915_dsc_fec_support_fops);
4955
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004956 return 0;
4957}