blob: 4c4876967cd63ac287db9c2b7ca44e6c4286e03c [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsonf3cd4742009-10-13 22:20:20 +010029#include <linux/debugfs.h>
Chris Wilsone637d2c2017-03-16 13:19:57 +000030#include <linux/sort.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010031#include <linux/sched/mm.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010032#include "intel_drv.h"
Sagar Arun Kamblea2695742017-11-16 19:02:41 +053033#include "intel_guc_submission.h"
Ben Gamari20172632009-02-17 20:08:50 -050034
Chris Wilson9f588922019-01-16 15:33:04 +000035#include "i915_reset.h"
36
David Weinehall36cdd012016-08-22 13:59:31 +030037static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
38{
39 return to_i915(node->minor->dev);
40}
41
Chris Wilson70d39fe2010-08-25 16:03:34 +010042static int i915_capabilities(struct seq_file *m, void *data)
43{
David Weinehall36cdd012016-08-22 13:59:31 +030044 struct drm_i915_private *dev_priv = node_to_i915(m->private);
45 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000046 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010047
David Weinehall36cdd012016-08-22 13:59:31 +030048 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020049 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030050 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000051
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000052 intel_device_info_dump_flags(info, &p);
Jani Nikula02584042018-12-31 16:56:41 +020053 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000054 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010055
Chris Wilson418e3cd2017-02-06 21:36:08 +000056 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000057 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000058 kernel_param_unlock(THIS_MODULE);
59
Chris Wilson70d39fe2010-08-25 16:03:34 +010060 return 0;
61}
Ben Gamari433e12f2009-02-17 20:08:51 -050062
Imre Deaka7363de2016-05-12 16:18:52 +030063static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000064{
Chris Wilson573adb32016-08-04 16:32:39 +010065 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000066}
67
Imre Deaka7363de2016-05-12 16:18:52 +030068static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010069{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010070 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010071}
72
Imre Deaka7363de2016-05-12 16:18:52 +030073static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000074{
Chris Wilson3e510a82016-08-05 10:14:23 +010075 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040076 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010077 case I915_TILING_NONE: return ' ';
78 case I915_TILING_X: return 'X';
79 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040080 }
Chris Wilsona6172a82009-02-11 14:26:38 +000081}
82
Imre Deaka7363de2016-05-12 16:18:52 +030083static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070084{
Chris Wilsona65adaf2017-10-09 09:43:57 +010085 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010086}
87
Imre Deaka7363de2016-05-12 16:18:52 +030088static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010089{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010090 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -070091}
92
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010093static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
94{
95 u64 size = 0;
96 struct i915_vma *vma;
97
Chris Wilsone2189dd2017-12-07 21:14:07 +000098 for_each_ggtt_vma(vma, obj) {
99 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100100 size += vma->node.size;
101 }
102
103 return size;
104}
105
Matthew Auld7393b7e2017-10-06 23:18:28 +0100106static const char *
107stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
108{
109 size_t x = 0;
110
111 switch (page_sizes) {
112 case 0:
113 return "";
114 case I915_GTT_PAGE_SIZE_4K:
115 return "4K";
116 case I915_GTT_PAGE_SIZE_64K:
117 return "64K";
118 case I915_GTT_PAGE_SIZE_2M:
119 return "2M";
120 default:
121 if (!buf)
122 return "M";
123
124 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
125 x += snprintf(buf + x, len - x, "2M, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
127 x += snprintf(buf + x, len - x, "64K, ");
128 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
129 x += snprintf(buf + x, len - x, "4K, ");
130 buf[x-2] = '\0';
131
132 return buf;
133 }
134}
135
Chris Wilson37811fc2010-08-25 22:45:57 +0100136static void
137describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
138{
Chris Wilsonb4716182015-04-27 13:41:17 +0100139 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000140 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700141 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100142 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800143 int pin_count = 0;
144
Chris Wilson188c1ab2016-04-03 14:14:20 +0100145 lockdep_assert_held(&obj->base.dev->struct_mutex);
146
Chris Wilsond07f0e52016-10-28 13:58:44 +0100147 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100148 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100149 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100150 get_pin_flag(obj),
151 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700152 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100153 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800154 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100155 obj->read_domains,
156 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300157 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100158 obj->mm.dirty ? " dirty" : "",
159 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100160 if (obj->base.name)
161 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson528cbd12019-01-28 10:23:54 +0000162 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100163 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800164 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300165 }
166 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100167 if (obj->pin_global)
168 seq_printf(m, " (global)");
Chris Wilson528cbd12019-01-28 10:23:54 +0000169 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100170 if (!drm_mm_node_allocated(&vma->node))
171 continue;
172
Matthew Auld7393b7e2017-10-06 23:18:28 +0100173 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100174 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100175 vma->node.start, vma->node.size,
176 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000177 if (i915_vma_is_ggtt(vma)) {
178 switch (vma->ggtt_view.type) {
179 case I915_GGTT_VIEW_NORMAL:
180 seq_puts(m, ", normal");
181 break;
182
183 case I915_GGTT_VIEW_PARTIAL:
184 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000185 vma->ggtt_view.partial.offset << PAGE_SHIFT,
186 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000187 break;
188
189 case I915_GGTT_VIEW_ROTATED:
190 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000191 vma->ggtt_view.rotated.plane[0].width,
192 vma->ggtt_view.rotated.plane[0].height,
193 vma->ggtt_view.rotated.plane[0].stride,
194 vma->ggtt_view.rotated.plane[0].offset,
195 vma->ggtt_view.rotated.plane[1].width,
196 vma->ggtt_view.rotated.plane[1].height,
197 vma->ggtt_view.rotated.plane[1].stride,
198 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000199 break;
200
201 default:
202 MISSING_CASE(vma->ggtt_view.type);
203 break;
204 }
205 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100206 if (vma->fence)
207 seq_printf(m, " , fence: %d%s",
208 vma->fence->id,
Chris Wilson21950ee2019-02-05 13:00:05 +0000209 i915_active_request_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000210 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700211 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000212 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100213 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100214
Chris Wilsond07f0e52016-10-28 13:58:44 +0100215 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100216 if (engine)
217 seq_printf(m, " (%s)", engine->name);
218
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100219 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
220 if (frontbuffer_bits)
221 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100222}
223
Chris Wilsone637d2c2017-03-16 13:19:57 +0000224static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100225{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000226 const struct drm_i915_gem_object *a =
227 *(const struct drm_i915_gem_object **)A;
228 const struct drm_i915_gem_object *b =
229 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100230
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200231 if (a->stolen->start < b->stolen->start)
232 return -1;
233 if (a->stolen->start > b->stolen->start)
234 return 1;
235 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100236}
237
238static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
239{
David Weinehall36cdd012016-08-22 13:59:31 +0300240 struct drm_i915_private *dev_priv = node_to_i915(m->private);
241 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000242 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100243 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300244 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000245 unsigned long total, count, n;
246 int ret;
247
248 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200249 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000250 if (!objects)
251 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100252
253 ret = mutex_lock_interruptible(&dev->struct_mutex);
254 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000255 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100256
257 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100258
259 spin_lock(&dev_priv->mm.obj_lock);
260 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000261 if (count == total)
262 break;
263
Chris Wilson6d2b88852013-08-07 18:30:54 +0100264 if (obj->stolen == NULL)
265 continue;
266
Chris Wilsone637d2c2017-03-16 13:19:57 +0000267 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100268 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100269 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000270
Chris Wilson6d2b88852013-08-07 18:30:54 +0100271 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100272 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000273 if (count == total)
274 break;
275
Chris Wilson6d2b88852013-08-07 18:30:54 +0100276 if (obj->stolen == NULL)
277 continue;
278
Chris Wilsone637d2c2017-03-16 13:19:57 +0000279 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100280 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100281 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100282 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100283
Chris Wilsone637d2c2017-03-16 13:19:57 +0000284 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
285
286 seq_puts(m, "Stolen:\n");
287 for (n = 0; n < count; n++) {
288 seq_puts(m, " ");
289 describe_obj(m, objects[n]);
290 seq_putc(m, '\n');
291 }
292 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100293 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000294
295 mutex_unlock(&dev->struct_mutex);
296out:
Michal Hocko20981052017-05-17 14:23:12 +0200297 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000298 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100299}
300
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100301struct file_stats {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000302 struct i915_address_space *vm;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300303 unsigned long count;
304 u64 total, unbound;
305 u64 global, shared;
306 u64 active, inactive;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000307 u64 closed;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100308};
309
310static int per_file_stats(int id, void *ptr, void *data)
311{
312 struct drm_i915_gem_object *obj = ptr;
313 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000314 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100315
Chris Wilson0caf81b2017-06-17 12:57:44 +0100316 lockdep_assert_held(&obj->base.dev->struct_mutex);
317
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100318 stats->count++;
319 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100320 if (!obj->bind_count)
321 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000322 if (obj->base.name || obj->base.dma_buf)
323 stats->shared += obj->base.size;
324
Chris Wilson528cbd12019-01-28 10:23:54 +0000325 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100326 if (!drm_mm_node_allocated(&vma->node))
327 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000328
Chris Wilson3272db52016-08-04 16:32:32 +0100329 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100330 stats->global += vma->node.size;
331 } else {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000332 if (vma->vm != stats->vm)
Chris Wilson6313c202014-03-19 13:45:45 +0000333 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000334 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100335
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100336 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100337 stats->active += vma->node.size;
338 else
339 stats->inactive += vma->node.size;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000340
341 if (i915_vma_is_closed(vma))
342 stats->closed += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100343 }
344
345 return 0;
346}
347
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100348#define print_file_stats(m, name, stats) do { \
349 if (stats.count) \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000350 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100351 name, \
352 stats.count, \
353 stats.total, \
354 stats.active, \
355 stats.inactive, \
356 stats.global, \
357 stats.shared, \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000358 stats.unbound, \
359 stats.closed); \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100360} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800361
362static void print_batch_pool_stats(struct seq_file *m,
363 struct drm_i915_private *dev_priv)
364{
365 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000366 struct intel_engine_cs *engine;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000367 struct file_stats stats = {};
Akash Goel3b3f1652016-10-13 22:44:48 +0530368 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000369 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800370
Akash Goel3b3f1652016-10-13 22:44:48 +0530371 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000372 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100373 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000374 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100375 batch_pool_link)
376 per_file_stats(0, obj, &stats);
377 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100378 }
Brad Volkin493018d2014-12-11 12:13:08 -0800379
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100380 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800381}
382
Chris Wilson15da9562016-05-24 14:53:43 +0100383static void print_context_stats(struct seq_file *m,
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000384 struct drm_i915_private *i915)
Chris Wilson15da9562016-05-24 14:53:43 +0100385{
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000386 struct file_stats kstats = {};
387 struct i915_gem_context *ctx;
Chris Wilson15da9562016-05-24 14:53:43 +0100388
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000389 list_for_each_entry(ctx, &i915->contexts.list, link) {
390 struct intel_engine_cs *engine;
391 enum intel_engine_id id;
Chris Wilson15da9562016-05-24 14:53:43 +0100392
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000393 for_each_engine(engine, i915, id) {
394 struct intel_context *ce = to_intel_context(ctx, engine);
Chris Wilson15da9562016-05-24 14:53:43 +0100395
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000396 if (ce->state)
397 per_file_stats(0, ce->state->obj, &kstats);
398 if (ce->ring)
399 per_file_stats(0, ce->ring->vma->obj, &kstats);
400 }
401
402 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
403 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
404 struct drm_file *file = ctx->file_priv->file;
405 struct task_struct *task;
406 char name[80];
407
408 spin_lock(&file->table_lock);
409 idr_for_each(&file->object_idr, per_file_stats, &stats);
410 spin_unlock(&file->table_lock);
411
412 rcu_read_lock();
413 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
414 snprintf(name, sizeof(name), "%s/%d",
415 task ? task->comm : "<unknown>",
416 ctx->user_handle);
417 rcu_read_unlock();
418
419 print_file_stats(m, name, stats);
420 }
Chris Wilson15da9562016-05-24 14:53:43 +0100421 }
Chris Wilson15da9562016-05-24 14:53:43 +0100422
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000423 print_file_stats(m, "[k]contexts", kstats);
Chris Wilson15da9562016-05-24 14:53:43 +0100424}
425
David Weinehall36cdd012016-08-22 13:59:31 +0300426static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100427{
David Weinehall36cdd012016-08-22 13:59:31 +0300428 struct drm_i915_private *dev_priv = node_to_i915(m->private);
429 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300430 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100431 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
432 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000433 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100434 unsigned int page_sizes = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100435 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100436 int ret;
437
Chris Wilson3ef7f222016-10-18 13:02:48 +0100438 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000439 dev_priv->mm.object_count,
440 dev_priv->mm.object_memory);
441
Chris Wilson1544c422016-08-15 13:18:16 +0100442 size = count = 0;
443 mapped_size = mapped_count = 0;
444 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100445 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100446
447 spin_lock(&dev_priv->mm.obj_lock);
448 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100449 size += obj->base.size;
450 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200451
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100452 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200453 purgeable_size += obj->base.size;
454 ++purgeable_count;
455 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100456
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100457 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100458 mapped_count++;
459 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100460 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100461
462 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
463 huge_count++;
464 huge_size += obj->base.size;
465 page_sizes |= obj->mm.page_sizes.sg;
466 }
Chris Wilson6299f992010-11-24 12:23:44 +0000467 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100468 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
469
470 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100471 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100472 size += obj->base.size;
473 ++count;
474
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100475 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100476 dpy_size += obj->base.size;
477 ++dpy_count;
478 }
479
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100480 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100481 purgeable_size += obj->base.size;
482 ++purgeable_count;
483 }
484
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100485 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100486 mapped_count++;
487 mapped_size += obj->base.size;
488 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100489
490 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
491 huge_count++;
492 huge_size += obj->base.size;
493 page_sizes |= obj->mm.page_sizes.sg;
494 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100495 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100496 spin_unlock(&dev_priv->mm.obj_lock);
497
Chris Wilson2bd160a2016-08-15 10:48:45 +0100498 seq_printf(m, "%u bound objects, %llu bytes\n",
499 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300500 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200501 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100502 seq_printf(m, "%u mapped objects, %llu bytes\n",
503 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100504 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
505 huge_count,
506 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
507 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100508 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100509 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000510
Matthew Auldb7128ef2017-12-11 15:18:22 +0000511 seq_printf(m, "%llu [%pa] gtt total\n",
Chris Wilson82ad6442018-06-05 16:37:58 +0100512 ggtt->vm.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100513 seq_printf(m, "Supported page sizes: %s\n",
514 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
515 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100516
Damien Lespiau267f0c92013-06-24 22:59:48 +0100517 seq_putc(m, '\n');
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000518
519 ret = mutex_lock_interruptible(&dev->struct_mutex);
520 if (ret)
521 return ret;
522
Brad Volkin493018d2014-12-11 12:13:08 -0800523 print_batch_pool_stats(m, dev_priv);
Chris Wilson15da9562016-05-24 14:53:43 +0100524 print_context_stats(m, dev_priv);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000525 mutex_unlock(&dev->struct_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100526
527 return 0;
528}
529
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100530static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000531{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100532 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300533 struct drm_i915_private *dev_priv = node_to_i915(node);
534 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100535 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000536 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300537 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100538 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000539 int count, ret;
540
Chris Wilsonf2123812017-10-16 12:40:37 +0100541 nobject = READ_ONCE(dev_priv->mm.object_count);
542 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
543 if (!objects)
544 return -ENOMEM;
545
Chris Wilson08c18322011-01-10 00:00:24 +0000546 ret = mutex_lock_interruptible(&dev->struct_mutex);
547 if (ret)
548 return ret;
549
Chris Wilsonf2123812017-10-16 12:40:37 +0100550 count = 0;
551 spin_lock(&dev_priv->mm.obj_lock);
552 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
553 objects[count++] = obj;
554 if (count == nobject)
555 break;
556 }
557 spin_unlock(&dev_priv->mm.obj_lock);
558
559 total_obj_size = total_gtt_size = 0;
560 for (n = 0; n < count; n++) {
561 obj = objects[n];
562
Damien Lespiau267f0c92013-06-24 22:59:48 +0100563 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000564 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100565 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000566 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100567 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000568 }
569
570 mutex_unlock(&dev->struct_mutex);
571
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300572 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000573 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100574 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000575
576 return 0;
577}
578
Brad Volkin493018d2014-12-11 12:13:08 -0800579static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
580{
David Weinehall36cdd012016-08-22 13:59:31 +0300581 struct drm_i915_private *dev_priv = node_to_i915(m->private);
582 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800583 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000584 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530585 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100586 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000587 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800588
589 ret = mutex_lock_interruptible(&dev->struct_mutex);
590 if (ret)
591 return ret;
592
Akash Goel3b3f1652016-10-13 22:44:48 +0530593 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000594 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100595 int count;
596
597 count = 0;
598 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000599 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100600 batch_pool_link)
601 count++;
602 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000603 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100604
605 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000606 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100607 batch_pool_link) {
608 seq_puts(m, " ");
609 describe_obj(m, obj);
610 seq_putc(m, '\n');
611 }
612
613 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100614 }
Brad Volkin493018d2014-12-11 12:13:08 -0800615 }
616
Chris Wilson8d9d5742015-04-07 16:20:38 +0100617 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800618
619 mutex_unlock(&dev->struct_mutex);
620
621 return 0;
622}
623
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200624static void gen8_display_interrupt_info(struct seq_file *m)
625{
626 struct drm_i915_private *dev_priv = node_to_i915(m->private);
627 int pipe;
628
629 for_each_pipe(dev_priv, pipe) {
630 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000631 intel_wakeref_t wakeref;
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200632
633 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000634 wakeref = intel_display_power_get_if_enabled(dev_priv,
635 power_domain);
636 if (!wakeref) {
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200637 seq_printf(m, "Pipe %c power disabled\n",
638 pipe_name(pipe));
639 continue;
640 }
641 seq_printf(m, "Pipe %c IMR:\t%08x\n",
642 pipe_name(pipe),
643 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
644 seq_printf(m, "Pipe %c IIR:\t%08x\n",
645 pipe_name(pipe),
646 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
647 seq_printf(m, "Pipe %c IER:\t%08x\n",
648 pipe_name(pipe),
649 I915_READ(GEN8_DE_PIPE_IER(pipe)));
650
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000651 intel_display_power_put(dev_priv, power_domain, wakeref);
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200652 }
653
654 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
655 I915_READ(GEN8_DE_PORT_IMR));
656 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
657 I915_READ(GEN8_DE_PORT_IIR));
658 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
659 I915_READ(GEN8_DE_PORT_IER));
660
661 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
662 I915_READ(GEN8_DE_MISC_IMR));
663 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
664 I915_READ(GEN8_DE_MISC_IIR));
665 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
666 I915_READ(GEN8_DE_MISC_IER));
667
668 seq_printf(m, "PCU interrupt mask:\t%08x\n",
669 I915_READ(GEN8_PCU_IMR));
670 seq_printf(m, "PCU interrupt identity:\t%08x\n",
671 I915_READ(GEN8_PCU_IIR));
672 seq_printf(m, "PCU interrupt enable:\t%08x\n",
673 I915_READ(GEN8_PCU_IER));
674}
675
Ben Gamari20172632009-02-17 20:08:50 -0500676static int i915_interrupt_info(struct seq_file *m, void *data)
677{
David Weinehall36cdd012016-08-22 13:59:31 +0300678 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000679 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530680 enum intel_engine_id id;
Chris Wilsona0371212019-01-14 14:21:14 +0000681 intel_wakeref_t wakeref;
Chris Wilson4bb05042016-09-03 07:53:43 +0100682 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100683
Chris Wilsona0371212019-01-14 14:21:14 +0000684 wakeref = intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500685
David Weinehall36cdd012016-08-22 13:59:31 +0300686 if (IS_CHERRYVIEW(dev_priv)) {
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000687 intel_wakeref_t pref;
688
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300689 seq_printf(m, "Master Interrupt Control:\t%08x\n",
690 I915_READ(GEN8_MASTER_IRQ));
691
692 seq_printf(m, "Display IER:\t%08x\n",
693 I915_READ(VLV_IER));
694 seq_printf(m, "Display IIR:\t%08x\n",
695 I915_READ(VLV_IIR));
696 seq_printf(m, "Display IIR_RW:\t%08x\n",
697 I915_READ(VLV_IIR_RW));
698 seq_printf(m, "Display IMR:\t%08x\n",
699 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100700 for_each_pipe(dev_priv, pipe) {
701 enum intel_display_power_domain power_domain;
702
703 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000704 pref = intel_display_power_get_if_enabled(dev_priv,
705 power_domain);
706 if (!pref) {
Chris Wilson9c870d02016-10-24 13:42:15 +0100707 seq_printf(m, "Pipe %c power disabled\n",
708 pipe_name(pipe));
709 continue;
710 }
711
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300712 seq_printf(m, "Pipe %c stat:\t%08x\n",
713 pipe_name(pipe),
714 I915_READ(PIPESTAT(pipe)));
715
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000716 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson9c870d02016-10-24 13:42:15 +0100717 }
718
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000719 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300720 seq_printf(m, "Port hotplug:\t%08x\n",
721 I915_READ(PORT_HOTPLUG_EN));
722 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
723 I915_READ(VLV_DPFLIPSTAT));
724 seq_printf(m, "DPINVGTT:\t%08x\n",
725 I915_READ(DPINVGTT));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000726 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300727
728 for (i = 0; i < 4; i++) {
729 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
730 i, I915_READ(GEN8_GT_IMR(i)));
731 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
732 i, I915_READ(GEN8_GT_IIR(i)));
733 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
734 i, I915_READ(GEN8_GT_IER(i)));
735 }
736
737 seq_printf(m, "PCU interrupt mask:\t%08x\n",
738 I915_READ(GEN8_PCU_IMR));
739 seq_printf(m, "PCU interrupt identity:\t%08x\n",
740 I915_READ(GEN8_PCU_IIR));
741 seq_printf(m, "PCU interrupt enable:\t%08x\n",
742 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200743 } else if (INTEL_GEN(dev_priv) >= 11) {
744 seq_printf(m, "Master Interrupt Control: %08x\n",
745 I915_READ(GEN11_GFX_MSTR_IRQ));
746
747 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
748 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
749 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
750 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
751 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
752 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
753 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
754 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
755 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
756 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
757 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
758 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
759
760 seq_printf(m, "Display Interrupt Control:\t%08x\n",
761 I915_READ(GEN11_DISPLAY_INT_CTL));
762
763 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300764 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700765 seq_printf(m, "Master Interrupt Control:\t%08x\n",
766 I915_READ(GEN8_MASTER_IRQ));
767
768 for (i = 0; i < 4; i++) {
769 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
770 i, I915_READ(GEN8_GT_IMR(i)));
771 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
772 i, I915_READ(GEN8_GT_IIR(i)));
773 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
774 i, I915_READ(GEN8_GT_IER(i)));
775 }
776
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200777 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300778 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700779 seq_printf(m, "Display IER:\t%08x\n",
780 I915_READ(VLV_IER));
781 seq_printf(m, "Display IIR:\t%08x\n",
782 I915_READ(VLV_IIR));
783 seq_printf(m, "Display IIR_RW:\t%08x\n",
784 I915_READ(VLV_IIR_RW));
785 seq_printf(m, "Display IMR:\t%08x\n",
786 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000787 for_each_pipe(dev_priv, pipe) {
788 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000789 intel_wakeref_t pref;
Chris Wilson4f4631a2017-02-10 13:36:32 +0000790
791 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000792 pref = intel_display_power_get_if_enabled(dev_priv,
793 power_domain);
794 if (!pref) {
Chris Wilson4f4631a2017-02-10 13:36:32 +0000795 seq_printf(m, "Pipe %c power disabled\n",
796 pipe_name(pipe));
797 continue;
798 }
799
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700800 seq_printf(m, "Pipe %c stat:\t%08x\n",
801 pipe_name(pipe),
802 I915_READ(PIPESTAT(pipe)));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000803 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson4f4631a2017-02-10 13:36:32 +0000804 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700805
806 seq_printf(m, "Master IER:\t%08x\n",
807 I915_READ(VLV_MASTER_IER));
808
809 seq_printf(m, "Render IER:\t%08x\n",
810 I915_READ(GTIER));
811 seq_printf(m, "Render IIR:\t%08x\n",
812 I915_READ(GTIIR));
813 seq_printf(m, "Render IMR:\t%08x\n",
814 I915_READ(GTIMR));
815
816 seq_printf(m, "PM IER:\t\t%08x\n",
817 I915_READ(GEN6_PMIER));
818 seq_printf(m, "PM IIR:\t\t%08x\n",
819 I915_READ(GEN6_PMIIR));
820 seq_printf(m, "PM IMR:\t\t%08x\n",
821 I915_READ(GEN6_PMIMR));
822
823 seq_printf(m, "Port hotplug:\t%08x\n",
824 I915_READ(PORT_HOTPLUG_EN));
825 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
826 I915_READ(VLV_DPFLIPSTAT));
827 seq_printf(m, "DPINVGTT:\t%08x\n",
828 I915_READ(DPINVGTT));
829
David Weinehall36cdd012016-08-22 13:59:31 +0300830 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800831 seq_printf(m, "Interrupt enable: %08x\n",
832 I915_READ(IER));
833 seq_printf(m, "Interrupt identity: %08x\n",
834 I915_READ(IIR));
835 seq_printf(m, "Interrupt mask: %08x\n",
836 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100837 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800838 seq_printf(m, "Pipe %c stat: %08x\n",
839 pipe_name(pipe),
840 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800841 } else {
842 seq_printf(m, "North Display Interrupt enable: %08x\n",
843 I915_READ(DEIER));
844 seq_printf(m, "North Display Interrupt identity: %08x\n",
845 I915_READ(DEIIR));
846 seq_printf(m, "North Display Interrupt mask: %08x\n",
847 I915_READ(DEIMR));
848 seq_printf(m, "South Display Interrupt enable: %08x\n",
849 I915_READ(SDEIER));
850 seq_printf(m, "South Display Interrupt identity: %08x\n",
851 I915_READ(SDEIIR));
852 seq_printf(m, "South Display Interrupt mask: %08x\n",
853 I915_READ(SDEIMR));
854 seq_printf(m, "Graphics Interrupt enable: %08x\n",
855 I915_READ(GTIER));
856 seq_printf(m, "Graphics Interrupt identity: %08x\n",
857 I915_READ(GTIIR));
858 seq_printf(m, "Graphics Interrupt mask: %08x\n",
859 I915_READ(GTIMR));
860 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200861
862 if (INTEL_GEN(dev_priv) >= 11) {
863 seq_printf(m, "RCS Intr Mask:\t %08x\n",
864 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
865 seq_printf(m, "BCS Intr Mask:\t %08x\n",
866 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
867 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
868 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
869 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
870 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
871 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
872 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
873 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
874 I915_READ(GEN11_GUC_SG_INTR_MASK));
875 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
876 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
877 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
878 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
879 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
880 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
881
882 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000883 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100884 seq_printf(m,
885 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000886 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000887 }
Chris Wilson9862e602011-01-04 22:22:17 +0000888 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200889
Chris Wilsona0371212019-01-14 14:21:14 +0000890 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100891
Ben Gamari20172632009-02-17 20:08:50 -0500892 return 0;
893}
894
Chris Wilsona6172a82009-02-11 14:26:38 +0000895static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
896{
David Weinehall36cdd012016-08-22 13:59:31 +0300897 struct drm_i915_private *dev_priv = node_to_i915(m->private);
898 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100899 int i, ret;
900
901 ret = mutex_lock_interruptible(&dev->struct_mutex);
902 if (ret)
903 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000904
Chris Wilsona6172a82009-02-11 14:26:38 +0000905 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
906 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100907 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000908
Chris Wilson6c085a72012-08-20 11:40:46 +0200909 seq_printf(m, "Fence %d, pin count = %d, object = ",
910 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100911 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100912 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100913 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100914 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100915 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000916 }
917
Chris Wilson05394f32010-11-08 19:18:58 +0000918 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000919 return 0;
920}
921
Chris Wilson98a2f412016-10-12 10:05:18 +0100922#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000923static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
924 size_t count, loff_t *pos)
925{
Chris Wilson0e390372018-11-23 13:23:25 +0000926 struct i915_gpu_state *error;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000927 ssize_t ret;
Chris Wilson0e390372018-11-23 13:23:25 +0000928 void *buf;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000929
Chris Wilson0e390372018-11-23 13:23:25 +0000930 error = file->private_data;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000931 if (!error)
932 return 0;
933
Chris Wilson0e390372018-11-23 13:23:25 +0000934 /* Bounce buffer required because of kernfs __user API convenience. */
935 buf = kmalloc(count, GFP_KERNEL);
936 if (!buf)
937 return -ENOMEM;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000938
Chris Wilson0e390372018-11-23 13:23:25 +0000939 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
940 if (ret <= 0)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000941 goto out;
942
Chris Wilson0e390372018-11-23 13:23:25 +0000943 if (!copy_to_user(ubuf, buf, ret))
944 *pos += ret;
945 else
946 ret = -EFAULT;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000947
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000948out:
Chris Wilson0e390372018-11-23 13:23:25 +0000949 kfree(buf);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000950 return ret;
951}
952
953static int gpu_state_release(struct inode *inode, struct file *file)
954{
955 i915_gpu_state_put(file->private_data);
956 return 0;
957}
958
959static int i915_gpu_info_open(struct inode *inode, struct file *file)
960{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100961 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000962 struct i915_gpu_state *gpu;
Chris Wilsona0371212019-01-14 14:21:14 +0000963 intel_wakeref_t wakeref;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000964
Chris Wilsond4225a52019-01-14 14:21:23 +0000965 gpu = NULL;
966 with_intel_runtime_pm(i915, wakeref)
967 gpu = i915_capture_gpu_state(i915);
Chris Wilsone6154e42018-12-07 11:05:54 +0000968 if (IS_ERR(gpu))
969 return PTR_ERR(gpu);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000970
971 file->private_data = gpu;
972 return 0;
973}
974
975static const struct file_operations i915_gpu_info_fops = {
976 .owner = THIS_MODULE,
977 .open = i915_gpu_info_open,
978 .read = gpu_state_read,
979 .llseek = default_llseek,
980 .release = gpu_state_release,
981};
Chris Wilson98a2f412016-10-12 10:05:18 +0100982
Daniel Vetterd5442302012-04-27 15:17:40 +0200983static ssize_t
984i915_error_state_write(struct file *filp,
985 const char __user *ubuf,
986 size_t cnt,
987 loff_t *ppos)
988{
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000989 struct i915_gpu_state *error = filp->private_data;
990
991 if (!error)
992 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200993
994 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000995 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +0200996
997 return cnt;
998}
999
1000static int i915_error_state_open(struct inode *inode, struct file *file)
1001{
Chris Wilsone6154e42018-12-07 11:05:54 +00001002 struct i915_gpu_state *error;
1003
1004 error = i915_first_error_state(inode->i_private);
1005 if (IS_ERR(error))
1006 return PTR_ERR(error);
1007
1008 file->private_data = error;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001009 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001010}
1011
Daniel Vetterd5442302012-04-27 15:17:40 +02001012static const struct file_operations i915_error_state_fops = {
1013 .owner = THIS_MODULE,
1014 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001015 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001016 .write = i915_error_state_write,
1017 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001018 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001019};
Chris Wilson98a2f412016-10-12 10:05:18 +01001020#endif
1021
Deepak Sadb4bd12014-03-31 11:30:02 +05301022static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001023{
David Weinehall36cdd012016-08-22 13:59:31 +03001024 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001025 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsona0371212019-01-14 14:21:14 +00001026 intel_wakeref_t wakeref;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001027 int ret = 0;
1028
Chris Wilsona0371212019-01-14 14:21:14 +00001029 wakeref = intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001030
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001031 if (IS_GEN(dev_priv, 5)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001032 u16 rgvswctl = I915_READ16(MEMSWCTL);
1033 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1034
1035 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1036 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1037 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1038 MEMSTAT_VID_SHIFT);
1039 seq_printf(m, "Current P-state: %d\n",
1040 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001041 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001042 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001043
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001044 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001045
1046 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1047 seq_printf(m, "Video Turbo Mode: %s\n",
1048 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1049 seq_printf(m, "HW control enabled: %s\n",
1050 yesno(rpmodectl & GEN6_RP_ENABLE));
1051 seq_printf(m, "SW control enabled: %s\n",
1052 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1053 GEN6_RP_MEDIA_SW_MODE));
1054
Wayne Boyer666a4532015-12-09 12:29:35 -08001055 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1056 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1057 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1058
1059 seq_printf(m, "actual GPU freq: %d MHz\n",
1060 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1061
1062 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001063 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001064
1065 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001066 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001067
1068 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001069 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001070
1071 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001072 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001073
1074 seq_printf(m,
1075 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001076 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001077 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001078 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001079 u32 rp_state_limits;
1080 u32 gt_perf_status;
1081 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001082 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001083 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001084 u32 rpupei, rpcurup, rpprevup;
1085 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001086 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001087 int max_freq;
1088
Bob Paauwe35040562015-06-25 14:54:07 -07001089 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001090 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001091 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1092 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1093 } else {
1094 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1095 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1096 }
1097
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001098 /* RPSTAT1 is in the GT power well */
Mika Kuoppala59bad942015-01-16 11:34:40 +02001099 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001100
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001101 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001102 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301103 reqf >>= 23;
1104 else {
1105 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001106 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301107 reqf >>= 24;
1108 else
1109 reqf >>= 25;
1110 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001111 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001112
Chris Wilson0d8f9492014-03-27 09:06:14 +00001113 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1114 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1115 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1116
Jesse Barnesccab5c82011-01-18 15:49:25 -08001117 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301118 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1119 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1120 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1121 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1122 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1123 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001124 cagf = intel_gpu_freq(dev_priv,
1125 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001126
Mika Kuoppala59bad942015-01-16 11:34:40 +02001127 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001128
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001129 if (INTEL_GEN(dev_priv) >= 11) {
1130 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1131 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1132 /*
1133 * The equivalent to the PM ISR & IIR cannot be read
1134 * without affecting the current state of the system
1135 */
1136 pm_isr = 0;
1137 pm_iir = 0;
1138 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001139 pm_ier = I915_READ(GEN8_GT_IER(2));
1140 pm_imr = I915_READ(GEN8_GT_IMR(2));
1141 pm_isr = I915_READ(GEN8_GT_ISR(2));
1142 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001143 } else {
1144 pm_ier = I915_READ(GEN6_PMIER);
1145 pm_imr = I915_READ(GEN6_PMIMR);
1146 pm_isr = I915_READ(GEN6_PMISR);
1147 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001148 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001149 pm_mask = I915_READ(GEN6_PMINTRMSK);
1150
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001151 seq_printf(m, "Video Turbo Mode: %s\n",
1152 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1153 seq_printf(m, "HW control enabled: %s\n",
1154 yesno(rpmodectl & GEN6_RP_ENABLE));
1155 seq_printf(m, "SW control enabled: %s\n",
1156 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1157 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001158
1159 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1160 pm_ier, pm_imr, pm_mask);
1161 if (INTEL_GEN(dev_priv) <= 10)
1162 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1163 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301164 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001165 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001166 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001167 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001168 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001169 seq_printf(m, "Render p-state VID: %d\n",
1170 gt_perf_status & 0xff);
1171 seq_printf(m, "Render p-state limit: %d\n",
1172 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001173 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1174 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1175 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1176 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001177 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001178 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301179 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1180 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1181 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1182 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1183 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1184 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilson60548c52018-07-31 14:26:29 +01001185 seq_printf(m, "Up threshold: %d%%\n",
1186 rps->power.up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001187
Akash Goeld6cda9c2016-04-23 00:05:46 +05301188 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1189 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1190 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1191 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1192 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1193 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilson60548c52018-07-31 14:26:29 +01001194 seq_printf(m, "Down threshold: %d%%\n",
1195 rps->power.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001196
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001197 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001198 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001199 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001200 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001201 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001202 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001203
1204 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001205 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001206 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001207 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001208 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001209
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001210 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001211 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001212 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001213 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001214 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001215 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001216 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001217 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001218
Chris Wilsond86ed342015-04-27 13:41:19 +01001219 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001220 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001221 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001222 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001223 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001224 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001225 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001226 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001227 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001228 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001229 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001230 seq_printf(m,
1231 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001232 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001233 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001234 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001235 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001236
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001237 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001238 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1239 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1240
Chris Wilsona0371212019-01-14 14:21:14 +00001241 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001242 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001243}
1244
Ben Widawskyd6369512016-09-20 16:54:32 +03001245static void i915_instdone_info(struct drm_i915_private *dev_priv,
1246 struct seq_file *m,
1247 struct intel_instdone *instdone)
1248{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001249 int slice;
1250 int subslice;
1251
Ben Widawskyd6369512016-09-20 16:54:32 +03001252 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1253 instdone->instdone);
1254
1255 if (INTEL_GEN(dev_priv) <= 3)
1256 return;
1257
1258 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1259 instdone->slice_common);
1260
1261 if (INTEL_GEN(dev_priv) <= 6)
1262 return;
1263
Ben Widawskyf9e61372016-09-20 16:54:33 +03001264 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1265 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1266 slice, subslice, instdone->sampler[slice][subslice]);
1267
1268 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1269 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1270 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001271}
1272
Chris Wilsonf6544492015-01-26 18:03:04 +02001273static int i915_hangcheck_info(struct seq_file *m, void *unused)
1274{
David Weinehall36cdd012016-08-22 13:59:31 +03001275 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001276 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001277 u64 acthd[I915_NUM_ENGINES];
1278 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001279 struct intel_instdone instdone;
Chris Wilsona0371212019-01-14 14:21:14 +00001280 intel_wakeref_t wakeref;
Dave Gordonc3232b12016-03-23 18:19:53 +00001281 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001282
Chris Wilson2caffbf2019-02-08 15:37:03 +00001283 seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
Chris Wilson8af29b02016-09-09 14:11:47 +01001284 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001285 seq_puts(m, "\tWedged\n");
Chris Wilson8c185ec2017-03-16 17:13:02 +00001286 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001287 seq_puts(m, "\tDevice (global) reset in progress\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001288
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001289 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001290 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001291 return 0;
1292 }
1293
Chris Wilsond4225a52019-01-14 14:21:23 +00001294 with_intel_runtime_pm(dev_priv, wakeref) {
1295 for_each_engine(engine, dev_priv, id) {
1296 acthd[id] = intel_engine_get_active_head(engine);
1297 seqno[id] = intel_engine_get_seqno(engine);
1298 }
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001299
Chris Wilsond4225a52019-01-14 14:21:23 +00001300 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001301 }
1302
Chris Wilson8352aea2017-03-03 09:00:56 +00001303 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1304 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001305 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1306 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001307 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1308 seq_puts(m, "Hangcheck active, work pending\n");
1309 else
1310 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001311
Chris Wilsonf73b5672017-03-02 15:03:56 +00001312 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1313
Akash Goel3b3f1652016-10-13 22:44:48 +05301314 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001315 seq_printf(m, "%s:\n", engine->name);
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001316 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
Chris Wilsoncb399ea2016-11-01 10:03:16 +00001317 engine->hangcheck.seqno, seqno[id],
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001318 intel_engine_last_submit(engine),
1319 jiffies_to_msecs(jiffies -
1320 engine->hangcheck.action_timestamp));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001321
Chris Wilsonf6544492015-01-26 18:03:04 +02001322 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001323 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001324 (long long)acthd[id]);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001325
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001326 if (engine->id == RCS) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001327 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001328
Ben Widawskyd6369512016-09-20 16:54:32 +03001329 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001330
Ben Widawskyd6369512016-09-20 16:54:32 +03001331 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001332
Ben Widawskyd6369512016-09-20 16:54:32 +03001333 i915_instdone_info(dev_priv, m,
1334 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001335 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001336 }
1337
1338 return 0;
1339}
1340
Michel Thierry061d06a2017-06-20 10:57:49 +01001341static int i915_reset_info(struct seq_file *m, void *unused)
1342{
1343 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1344 struct i915_gpu_error *error = &dev_priv->gpu_error;
1345 struct intel_engine_cs *engine;
1346 enum intel_engine_id id;
1347
1348 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1349
1350 for_each_engine(engine, dev_priv, id) {
1351 seq_printf(m, "%s = %u\n", engine->name,
1352 i915_reset_engine_count(error, engine));
1353 }
1354
1355 return 0;
1356}
1357
Ben Widawsky4d855292011-12-12 19:34:16 -08001358static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001359{
David Weinehall36cdd012016-08-22 13:59:31 +03001360 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001361 u32 rgvmodectl, rstdbyctl;
1362 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001363
Ben Widawsky616fdb52011-10-05 11:44:54 -07001364 rgvmodectl = I915_READ(MEMMODECTL);
1365 rstdbyctl = I915_READ(RSTDBYCTL);
1366 crstandvid = I915_READ16(CRSTANDVID);
1367
Jani Nikula742f4912015-09-03 11:16:09 +03001368 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001369 seq_printf(m, "Boost freq: %d\n",
1370 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1371 MEMMODE_BOOST_FREQ_SHIFT);
1372 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001373 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001374 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001375 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001376 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001377 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001378 seq_printf(m, "Starting frequency: P%d\n",
1379 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001380 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001381 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001382 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1383 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1384 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1385 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001386 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001387 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001388 switch (rstdbyctl & RSX_STATUS_MASK) {
1389 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001390 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001391 break;
1392 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001393 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001394 break;
1395 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001396 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001397 break;
1398 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001399 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001400 break;
1401 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001402 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001403 break;
1404 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001405 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001406 break;
1407 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001408 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001409 break;
1410 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001411
1412 return 0;
1413}
1414
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001415static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001416{
Chris Wilson233ebf52017-03-23 10:19:44 +00001417 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001418 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001419 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001420
Chris Wilsond7a133d2017-09-07 14:44:41 +01001421 seq_printf(m, "user.bypass_count = %u\n",
1422 i915->uncore.user_forcewake.count);
1423
Chris Wilson233ebf52017-03-23 10:19:44 +00001424 for_each_fw_domain(fw_domain, i915, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001425 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001426 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001427 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001428
1429 return 0;
1430}
1431
Mika Kuoppala13628772017-03-15 17:43:02 +02001432static void print_rc6_res(struct seq_file *m,
1433 const char *title,
1434 const i915_reg_t reg)
1435{
1436 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1437
1438 seq_printf(m, "%s %u (%llu us)\n",
1439 title, I915_READ(reg),
1440 intel_rc6_residency_us(dev_priv, reg));
1441}
1442
Deepak S669ab5a2014-01-10 15:18:26 +05301443static int vlv_drpc_info(struct seq_file *m)
1444{
David Weinehall36cdd012016-08-22 13:59:31 +03001445 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001446 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301447
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001448 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301449 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1450
Deepak S669ab5a2014-01-10 15:18:26 +05301451 seq_printf(m, "RC6 Enabled: %s\n",
1452 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1453 GEN6_RC_CTL_EI_MODE(1))));
1454 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001455 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301456 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001457 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301458
Mika Kuoppala13628772017-03-15 17:43:02 +02001459 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1460 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001461
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001462 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301463}
1464
Ben Widawsky4d855292011-12-12 19:34:16 -08001465static int gen6_drpc_info(struct seq_file *m)
1466{
David Weinehall36cdd012016-08-22 13:59:31 +03001467 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001468 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301469 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001470
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001471 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001472 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001473
Ben Widawsky4d855292011-12-12 19:34:16 -08001474 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001475 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301476 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1477 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1478 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001479
Imre Deak51cc9ad2018-02-08 19:41:02 +02001480 if (INTEL_GEN(dev_priv) <= 7) {
1481 mutex_lock(&dev_priv->pcu_lock);
1482 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1483 &rc6vids);
1484 mutex_unlock(&dev_priv->pcu_lock);
1485 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001486
Eric Anholtfff24e22012-01-23 16:14:05 -08001487 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001488 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1489 seq_printf(m, "RC6 Enabled: %s\n",
1490 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001491 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301492 seq_printf(m, "Render Well Gating Enabled: %s\n",
1493 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1494 seq_printf(m, "Media Well Gating Enabled: %s\n",
1495 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1496 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001497 seq_printf(m, "Deep RC6 Enabled: %s\n",
1498 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1499 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1500 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001501 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001502 switch (gt_core_status & GEN6_RCn_MASK) {
1503 case GEN6_RC0:
1504 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001505 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001506 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001507 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001508 break;
1509 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001510 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001511 break;
1512 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001513 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001514 break;
1515 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001516 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001517 break;
1518 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001519 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001520 break;
1521 }
1522
1523 seq_printf(m, "Core Power Down: %s\n",
1524 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001525 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301526 seq_printf(m, "Render Power Well: %s\n",
1527 (gen9_powergate_status &
1528 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1529 seq_printf(m, "Media Power Well: %s\n",
1530 (gen9_powergate_status &
1531 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1532 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001533
1534 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001535 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1536 GEN6_GT_GFX_RC6_LOCKED);
1537 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1538 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1539 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001540
Imre Deak51cc9ad2018-02-08 19:41:02 +02001541 if (INTEL_GEN(dev_priv) <= 7) {
1542 seq_printf(m, "RC6 voltage: %dmV\n",
1543 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1544 seq_printf(m, "RC6+ voltage: %dmV\n",
1545 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1546 seq_printf(m, "RC6++ voltage: %dmV\n",
1547 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1548 }
1549
Akash Goelf2dd7572016-06-27 20:10:01 +05301550 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001551}
1552
1553static int i915_drpc_info(struct seq_file *m, void *unused)
1554{
David Weinehall36cdd012016-08-22 13:59:31 +03001555 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001556 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00001557 int err = -ENODEV;
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001558
Chris Wilsond4225a52019-01-14 14:21:23 +00001559 with_intel_runtime_pm(dev_priv, wakeref) {
1560 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1561 err = vlv_drpc_info(m);
1562 else if (INTEL_GEN(dev_priv) >= 6)
1563 err = gen6_drpc_info(m);
1564 else
1565 err = ironlake_drpc_info(m);
1566 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001567
1568 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001569}
1570
Daniel Vetter9a851782015-06-18 10:30:22 +02001571static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1572{
David Weinehall36cdd012016-08-22 13:59:31 +03001573 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001574
1575 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1576 dev_priv->fb_tracking.busy_bits);
1577
1578 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1579 dev_priv->fb_tracking.flip_bits);
1580
1581 return 0;
1582}
1583
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001584static int i915_fbc_status(struct seq_file *m, void *unused)
1585{
David Weinehall36cdd012016-08-22 13:59:31 +03001586 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001587 struct intel_fbc *fbc = &dev_priv->fbc;
Chris Wilsona0371212019-01-14 14:21:14 +00001588 intel_wakeref_t wakeref;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001589
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001590 if (!HAS_FBC(dev_priv))
1591 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001592
Chris Wilsona0371212019-01-14 14:21:14 +00001593 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001594 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001595
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001596 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001597 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001598 else
Chris Wilson31388722017-12-20 20:58:48 +00001599 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1600
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001601 if (intel_fbc_is_active(dev_priv)) {
1602 u32 mask;
1603
1604 if (INTEL_GEN(dev_priv) >= 8)
1605 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1606 else if (INTEL_GEN(dev_priv) >= 7)
1607 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1608 else if (INTEL_GEN(dev_priv) >= 5)
1609 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1610 else if (IS_G4X(dev_priv))
1611 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1612 else
1613 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1614 FBC_STAT_COMPRESSED);
1615
1616 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001617 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001618
Chris Wilson31388722017-12-20 20:58:48 +00001619 mutex_unlock(&fbc->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00001620 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001621
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001622 return 0;
1623}
1624
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001625static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001626{
David Weinehall36cdd012016-08-22 13:59:31 +03001627 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001628
David Weinehall36cdd012016-08-22 13:59:31 +03001629 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001630 return -ENODEV;
1631
Rodrigo Vivida46f932014-08-01 02:04:45 -07001632 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001633
1634 return 0;
1635}
1636
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001637static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001638{
David Weinehall36cdd012016-08-22 13:59:31 +03001639 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001640 u32 reg;
1641
David Weinehall36cdd012016-08-22 13:59:31 +03001642 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001643 return -ENODEV;
1644
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001645 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001646
1647 reg = I915_READ(ILK_DPFC_CONTROL);
1648 dev_priv->fbc.false_color = val;
1649
1650 I915_WRITE(ILK_DPFC_CONTROL, val ?
1651 (reg | FBC_CTL_FALSE_COLOR) :
1652 (reg & ~FBC_CTL_FALSE_COLOR));
1653
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001654 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001655 return 0;
1656}
1657
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001658DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1659 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001660 "%llu\n");
1661
Paulo Zanoni92d44622013-05-31 16:33:24 -03001662static int i915_ips_status(struct seq_file *m, void *unused)
1663{
David Weinehall36cdd012016-08-22 13:59:31 +03001664 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001665 intel_wakeref_t wakeref;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001666
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001667 if (!HAS_IPS(dev_priv))
1668 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001669
Chris Wilsona0371212019-01-14 14:21:14 +00001670 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001671
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001672 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001673 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001674
David Weinehall36cdd012016-08-22 13:59:31 +03001675 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001676 seq_puts(m, "Currently: unknown\n");
1677 } else {
1678 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1679 seq_puts(m, "Currently: enabled\n");
1680 else
1681 seq_puts(m, "Currently: disabled\n");
1682 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001683
Chris Wilsona0371212019-01-14 14:21:14 +00001684 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001685
Paulo Zanoni92d44622013-05-31 16:33:24 -03001686 return 0;
1687}
1688
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001689static int i915_sr_status(struct seq_file *m, void *unused)
1690{
David Weinehall36cdd012016-08-22 13:59:31 +03001691 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001692 intel_wakeref_t wakeref;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001693 bool sr_enabled = false;
1694
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001695 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001696
Chris Wilson7342a722017-03-09 14:20:49 +00001697 if (INTEL_GEN(dev_priv) >= 9)
1698 /* no global SR status; inspect per-plane WM */;
1699 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001700 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001701 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001702 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001703 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001704 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001705 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001706 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001707 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001708 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001709 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001710
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001711 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001712
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001713 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001714
1715 return 0;
1716}
1717
Jesse Barnes7648fa92010-05-20 14:28:11 -07001718static int i915_emon_status(struct seq_file *m, void *unused)
1719{
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001720 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001721 intel_wakeref_t wakeref;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001722
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001723 if (!IS_GEN(i915, 5))
Chris Wilson582be6b2012-04-30 19:35:02 +01001724 return -ENODEV;
1725
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001726 with_intel_runtime_pm(i915, wakeref) {
1727 unsigned long temp, chipset, gfx;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001728
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001729 temp = i915_mch_val(i915);
1730 chipset = i915_chipset_val(i915);
1731 gfx = i915_gfx_val(i915);
Chris Wilsona0371212019-01-14 14:21:14 +00001732
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001733 seq_printf(m, "GMCH temp: %ld\n", temp);
1734 seq_printf(m, "Chipset power: %ld\n", chipset);
1735 seq_printf(m, "GFX power: %ld\n", gfx);
1736 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1737 }
Jesse Barnes7648fa92010-05-20 14:28:11 -07001738
1739 return 0;
1740}
1741
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001742static int i915_ring_freq_table(struct seq_file *m, void *unused)
1743{
David Weinehall36cdd012016-08-22 13:59:31 +03001744 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001745 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301746 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001747 intel_wakeref_t wakeref;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001748 int gpu_freq, ia_freq;
1749 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001750
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001751 if (!HAS_LLC(dev_priv))
1752 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001753
Chris Wilsona0371212019-01-14 14:21:14 +00001754 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001755
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001756 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001757 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001758 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001759
Chris Wilsond586b5f2018-03-08 14:26:48 +00001760 min_gpu_freq = rps->min_freq;
1761 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001762 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301763 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001764 min_gpu_freq /= GEN9_FREQ_SCALER;
1765 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301766 }
1767
Damien Lespiau267f0c92013-06-24 22:59:48 +01001768 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001769
Akash Goelf936ec32015-06-29 14:50:22 +05301770 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001771 ia_freq = gpu_freq;
1772 sandybridge_pcode_read(dev_priv,
1773 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1774 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001775 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301776 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001777 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001778 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001779 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001780 ((ia_freq >> 0) & 0xff) * 100,
1781 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001782 }
1783
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001784 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001785
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001786out:
Chris Wilsona0371212019-01-14 14:21:14 +00001787 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001788 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001789}
1790
Chris Wilson44834a62010-08-19 16:09:23 +01001791static int i915_opregion(struct seq_file *m, void *unused)
1792{
David Weinehall36cdd012016-08-22 13:59:31 +03001793 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1794 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001795 struct intel_opregion *opregion = &dev_priv->opregion;
1796 int ret;
1797
1798 ret = mutex_lock_interruptible(&dev->struct_mutex);
1799 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001800 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001801
Jani Nikula2455a8e2015-12-14 12:50:53 +02001802 if (opregion->header)
1803 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001804
1805 mutex_unlock(&dev->struct_mutex);
1806
Daniel Vetter0d38f002012-04-21 22:49:10 +02001807out:
Chris Wilson44834a62010-08-19 16:09:23 +01001808 return 0;
1809}
1810
Jani Nikulaada8f952015-12-15 13:17:12 +02001811static int i915_vbt(struct seq_file *m, void *unused)
1812{
David Weinehall36cdd012016-08-22 13:59:31 +03001813 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001814
1815 if (opregion->vbt)
1816 seq_write(m, opregion->vbt, opregion->vbt_size);
1817
1818 return 0;
1819}
1820
Chris Wilson37811fc2010-08-25 22:45:57 +01001821static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1822{
David Weinehall36cdd012016-08-22 13:59:31 +03001823 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1824 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301825 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001826 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001827 int ret;
1828
1829 ret = mutex_lock_interruptible(&dev->struct_mutex);
1830 if (ret)
1831 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001832
Daniel Vetter06957262015-08-10 13:34:08 +02001833#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001834 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001835 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001836
Chris Wilson25bcce92016-07-02 15:36:00 +01001837 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1838 fbdev_fb->base.width,
1839 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001840 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001841 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001842 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001843 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001844 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001845 seq_putc(m, '\n');
1846 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001847#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001848
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001849 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001850 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301851 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1852 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001853 continue;
1854
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001855 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001856 fb->base.width,
1857 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001858 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001859 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001860 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001861 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001862 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001863 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001864 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001865 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001866 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001867
1868 return 0;
1869}
1870
Chris Wilson7e37f882016-08-02 22:50:21 +01001871static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001872{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001873 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1874 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001875}
1876
Ben Widawskye76d3632011-03-19 18:14:29 -07001877static int i915_context_status(struct seq_file *m, void *unused)
1878{
David Weinehall36cdd012016-08-22 13:59:31 +03001879 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1880 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001881 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001882 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05301883 enum intel_engine_id id;
Dave Gordonc3232b12016-03-23 18:19:53 +00001884 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001885
Daniel Vetterf3d28872014-05-29 23:23:08 +02001886 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001887 if (ret)
1888 return ret;
1889
Chris Wilson829a0af2017-06-20 12:05:45 +01001890 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson288f1ce2018-09-04 16:31:17 +01001891 seq_puts(m, "HW context ");
1892 if (!list_empty(&ctx->hw_id_link))
1893 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1894 atomic_read(&ctx->hw_id_pin_count));
Chris Wilsonc84455b2016-08-15 10:49:08 +01001895 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001896 struct task_struct *task;
1897
Chris Wilsonc84455b2016-08-15 10:49:08 +01001898 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001899 if (task) {
1900 seq_printf(m, "(%s [%d]) ",
1901 task->comm, task->pid);
1902 put_task_struct(task);
1903 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001904 } else if (IS_ERR(ctx->file_priv)) {
1905 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001906 } else {
1907 seq_puts(m, "(kernel) ");
1908 }
1909
Chris Wilsonbca44d82016-05-24 14:53:41 +01001910 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1911 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001912
Akash Goel3b3f1652016-10-13 22:44:48 +05301913 for_each_engine(engine, dev_priv, id) {
Chris Wilsonab82a062018-04-30 14:15:01 +01001914 struct intel_context *ce =
1915 to_intel_context(ctx, engine);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001916
1917 seq_printf(m, "%s: ", engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001918 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001919 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001920 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001921 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001922 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001923 }
1924
Ben Widawskya33afea2013-09-17 21:12:45 -07001925 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001926 }
1927
Daniel Vetterf3d28872014-05-29 23:23:08 +02001928 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001929
1930 return 0;
1931}
1932
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001933static const char *swizzle_string(unsigned swizzle)
1934{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001935 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001936 case I915_BIT_6_SWIZZLE_NONE:
1937 return "none";
1938 case I915_BIT_6_SWIZZLE_9:
1939 return "bit9";
1940 case I915_BIT_6_SWIZZLE_9_10:
1941 return "bit9/bit10";
1942 case I915_BIT_6_SWIZZLE_9_11:
1943 return "bit9/bit11";
1944 case I915_BIT_6_SWIZZLE_9_10_11:
1945 return "bit9/bit10/bit11";
1946 case I915_BIT_6_SWIZZLE_9_17:
1947 return "bit9/bit17";
1948 case I915_BIT_6_SWIZZLE_9_10_17:
1949 return "bit9/bit10/bit17";
1950 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001951 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001952 }
1953
1954 return "bug";
1955}
1956
1957static int i915_swizzle_info(struct seq_file *m, void *data)
1958{
David Weinehall36cdd012016-08-22 13:59:31 +03001959 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001960 intel_wakeref_t wakeref;
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001961
Chris Wilsona0371212019-01-14 14:21:14 +00001962 wakeref = intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001963
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001964 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1965 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1966 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1967 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1968
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08001969 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001970 seq_printf(m, "DDC = 0x%08x\n",
1971 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01001972 seq_printf(m, "DDC2 = 0x%08x\n",
1973 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001974 seq_printf(m, "C0DRB3 = 0x%04x\n",
1975 I915_READ16(C0DRB3));
1976 seq_printf(m, "C1DRB3 = 0x%04x\n",
1977 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03001978 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001979 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1980 I915_READ(MAD_DIMM_C0));
1981 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1982 I915_READ(MAD_DIMM_C1));
1983 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1984 I915_READ(MAD_DIMM_C2));
1985 seq_printf(m, "TILECTL = 0x%08x\n",
1986 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03001987 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07001988 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1989 I915_READ(GAMTARBMODE));
1990 else
1991 seq_printf(m, "ARB_MODE = 0x%08x\n",
1992 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001993 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1994 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001995 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001996
1997 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1998 seq_puts(m, "L-shaped memory detected\n");
1999
Chris Wilsona0371212019-01-14 14:21:14 +00002000 intel_runtime_pm_put(dev_priv, wakeref);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002001
2002 return 0;
2003}
2004
Chris Wilson7466c292016-08-15 09:49:33 +01002005static const char *rps_power_to_str(unsigned int power)
2006{
2007 static const char * const strings[] = {
2008 [LOW_POWER] = "low power",
2009 [BETWEEN] = "mixed",
2010 [HIGH_POWER] = "high power",
2011 };
2012
2013 if (power >= ARRAY_SIZE(strings) || !strings[power])
2014 return "unknown";
2015
2016 return strings[power];
2017}
2018
Chris Wilson1854d5c2015-04-07 16:20:32 +01002019static int i915_rps_boost_info(struct seq_file *m, void *data)
2020{
David Weinehall36cdd012016-08-22 13:59:31 +03002021 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2022 struct drm_device *dev = &dev_priv->drm;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002023 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002024 u32 act_freq = rps->cur_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00002025 intel_wakeref_t wakeref;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002026 struct drm_file *file;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002027
Chris Wilsond4225a52019-01-14 14:21:23 +00002028 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002029 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2030 mutex_lock(&dev_priv->pcu_lock);
2031 act_freq = vlv_punit_read(dev_priv,
2032 PUNIT_REG_GPU_FREQ_STS);
2033 act_freq = (act_freq >> 8) & 0xff;
2034 mutex_unlock(&dev_priv->pcu_lock);
2035 } else {
2036 act_freq = intel_get_cagf(dev_priv,
2037 I915_READ(GEN6_RPSTAT1));
2038 }
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002039 }
2040
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002041 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002042 seq_printf(m, "GPU busy? %s [%d requests]\n",
2043 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002044 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002045 atomic_read(&rps->num_waiters));
Chris Wilson60548c52018-07-31 14:26:29 +01002046 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002047 seq_printf(m, "Frequency requested %d, actual %d\n",
2048 intel_gpu_freq(dev_priv, rps->cur_freq),
2049 intel_gpu_freq(dev_priv, act_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002050 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002051 intel_gpu_freq(dev_priv, rps->min_freq),
2052 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2053 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2054 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002055 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002056 intel_gpu_freq(dev_priv, rps->idle_freq),
2057 intel_gpu_freq(dev_priv, rps->efficient_freq),
2058 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002059
2060 mutex_lock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002061 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2062 struct drm_i915_file_private *file_priv = file->driver_priv;
2063 struct task_struct *task;
2064
2065 rcu_read_lock();
2066 task = pid_task(file->pid, PIDTYPE_PID);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002067 seq_printf(m, "%s [%d]: %d boosts\n",
Chris Wilson1854d5c2015-04-07 16:20:32 +01002068 task ? task->comm : "<unknown>",
2069 task ? task->pid : -1,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002070 atomic_read(&file_priv->rps_client.boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002071 rcu_read_unlock();
2072 }
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002073 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002074 atomic_read(&rps->boosts));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002075 mutex_unlock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002076
Chris Wilson7466c292016-08-15 09:49:33 +01002077 if (INTEL_GEN(dev_priv) >= 6 &&
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002078 rps->enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002079 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002080 u32 rpup, rpupei;
2081 u32 rpdown, rpdownei;
2082
2083 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2084 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2085 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2086 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2087 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2088 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2089
2090 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Chris Wilson60548c52018-07-31 14:26:29 +01002091 rps_power_to_str(rps->power.mode));
Chris Wilson7466c292016-08-15 09:49:33 +01002092 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002093 rpup && rpupei ? 100 * rpup / rpupei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002094 rps->power.up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002095 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002096 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002097 rps->power.down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002098 } else {
2099 seq_puts(m, "\nRPS Autotuning inactive\n");
2100 }
2101
Chris Wilson8d3afd72015-05-21 21:01:47 +01002102 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002103}
2104
Ben Widawsky63573eb2013-07-04 11:02:07 -07002105static int i915_llc(struct seq_file *m, void *data)
2106{
David Weinehall36cdd012016-08-22 13:59:31 +03002107 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002108 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002109
David Weinehall36cdd012016-08-22 13:59:31 +03002110 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002111 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2112 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002113
2114 return 0;
2115}
2116
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002117static int i915_huc_load_status_info(struct seq_file *m, void *data)
2118{
2119 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002120 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002121 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002122
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002123 if (!HAS_HUC(dev_priv))
2124 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002125
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002126 p = drm_seq_file_printer(m);
2127 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002128
Chris Wilsond4225a52019-01-14 14:21:23 +00002129 with_intel_runtime_pm(dev_priv, wakeref)
2130 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002131
2132 return 0;
2133}
2134
Alex Daifdf5d352015-08-12 15:43:37 +01002135static int i915_guc_load_status_info(struct seq_file *m, void *data)
2136{
David Weinehall36cdd012016-08-22 13:59:31 +03002137 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002138 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002139 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002140
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002141 if (!HAS_GUC(dev_priv))
2142 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002143
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002144 p = drm_seq_file_printer(m);
2145 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002146
Chris Wilsond4225a52019-01-14 14:21:23 +00002147 with_intel_runtime_pm(dev_priv, wakeref) {
2148 u32 tmp = I915_READ(GUC_STATUS);
2149 u32 i;
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302150
Chris Wilsond4225a52019-01-14 14:21:23 +00002151 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2152 seq_printf(m, "\tBootrom status = 0x%x\n",
2153 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2154 seq_printf(m, "\tuKernel status = 0x%x\n",
2155 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2156 seq_printf(m, "\tMIA Core status = 0x%x\n",
2157 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2158 seq_puts(m, "\nScratch registers:\n");
2159 for (i = 0; i < 16; i++) {
2160 seq_printf(m, "\t%2d: \t0x%x\n",
2161 i, I915_READ(SOFT_SCRATCH(i)));
2162 }
2163 }
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302164
Alex Daifdf5d352015-08-12 15:43:37 +01002165 return 0;
2166}
2167
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002168static const char *
2169stringify_guc_log_type(enum guc_log_buffer_type type)
2170{
2171 switch (type) {
2172 case GUC_ISR_LOG_BUFFER:
2173 return "ISR";
2174 case GUC_DPC_LOG_BUFFER:
2175 return "DPC";
2176 case GUC_CRASH_DUMP_LOG_BUFFER:
2177 return "CRASH";
2178 default:
2179 MISSING_CASE(type);
2180 }
2181
2182 return "";
2183}
2184
Akash Goel5aa1ee42016-10-12 21:54:36 +05302185static void i915_guc_log_info(struct seq_file *m,
2186 struct drm_i915_private *dev_priv)
2187{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002188 struct intel_guc_log *log = &dev_priv->guc.log;
2189 enum guc_log_buffer_type type;
2190
2191 if (!intel_guc_log_relay_enabled(log)) {
2192 seq_puts(m, "GuC log relay disabled\n");
2193 return;
2194 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302195
Michał Winiarskidb557992018-03-19 10:53:43 +01002196 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302197
Michał Winiarski6a96be22018-03-19 10:53:42 +01002198 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002199 log->relay.full_count);
2200
2201 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2202 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2203 stringify_guc_log_type(type),
2204 log->stats[type].flush,
2205 log->stats[type].sampled_overflow);
2206 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302207}
2208
Dave Gordon8b417c22015-08-12 15:43:44 +01002209static void i915_guc_client_info(struct seq_file *m,
2210 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302211 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002212{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002213 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002214 enum intel_engine_id id;
Jani Nikulae5315212019-01-16 11:15:23 +02002215 u64 tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002216
Oscar Mateob09935a2017-03-22 10:39:53 -07002217 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2218 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002219 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2220 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002221
Akash Goel3b3f1652016-10-13 22:44:48 +05302222 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002223 u64 submissions = client->submissions[id];
2224 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002225 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002226 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002227 }
2228 seq_printf(m, "\tTotal: %llu\n", tot);
2229}
2230
2231static int i915_guc_info(struct seq_file *m, void *data)
2232{
David Weinehall36cdd012016-08-22 13:59:31 +03002233 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002234 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002235
Michał Winiarskidb557992018-03-19 10:53:43 +01002236 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002237 return -ENODEV;
2238
Michał Winiarskidb557992018-03-19 10:53:43 +01002239 i915_guc_log_info(m, dev_priv);
2240
2241 if (!USES_GUC_SUBMISSION(dev_priv))
2242 return 0;
2243
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002244 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002245
Michał Winiarskidb557992018-03-19 10:53:43 +01002246 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002247 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002248 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002249
Chris Wilson334636c2016-11-29 12:10:20 +00002250 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2251 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002252 if (guc->preempt_client) {
2253 seq_printf(m, "\nGuC preempt client @ %p:\n",
2254 guc->preempt_client);
2255 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2256 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002257
2258 /* Add more as required ... */
2259
2260 return 0;
2261}
2262
Oscar Mateoa8b93702017-05-10 15:04:51 +00002263static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002264{
David Weinehall36cdd012016-08-22 13:59:31 +03002265 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002266 const struct intel_guc *guc = &dev_priv->guc;
2267 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302268 struct intel_guc_client *client = guc->execbuf_client;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002269 unsigned int tmp;
2270 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002271
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002272 if (!USES_GUC_SUBMISSION(dev_priv))
2273 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002274
Oscar Mateoa8b93702017-05-10 15:04:51 +00002275 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2276 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002277
Oscar Mateoa8b93702017-05-10 15:04:51 +00002278 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2279 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002280
Oscar Mateoa8b93702017-05-10 15:04:51 +00002281 seq_printf(m, "GuC stage descriptor %u:\n", index);
2282 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2283 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2284 seq_printf(m, "\tPriority: %d\n", desc->priority);
2285 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2286 seq_printf(m, "\tEngines used: 0x%x\n",
2287 desc->engines_used);
2288 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2289 desc->db_trigger_phy,
2290 desc->db_trigger_cpu,
2291 desc->db_trigger_uk);
2292 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2293 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002294 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002295 desc->wq_addr, desc->wq_size);
2296 seq_putc(m, '\n');
2297
2298 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2299 u32 guc_engine_id = engine->guc_id;
2300 struct guc_execlist_context *lrc =
2301 &desc->lrc[guc_engine_id];
2302
2303 seq_printf(m, "\t%s LRC:\n", engine->name);
2304 seq_printf(m, "\t\tContext desc: 0x%x\n",
2305 lrc->context_desc);
2306 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2307 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2308 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2309 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2310 seq_putc(m, '\n');
2311 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002312 }
2313
Oscar Mateoa8b93702017-05-10 15:04:51 +00002314 return 0;
2315}
2316
Alex Dai4c7e77f2015-08-12 15:43:40 +01002317static int i915_guc_log_dump(struct seq_file *m, void *data)
2318{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002319 struct drm_info_node *node = m->private;
2320 struct drm_i915_private *dev_priv = node_to_i915(node);
2321 bool dump_load_err = !!node->info_ent->data;
2322 struct drm_i915_gem_object *obj = NULL;
2323 u32 *log;
2324 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002325
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002326 if (!HAS_GUC(dev_priv))
2327 return -ENODEV;
2328
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002329 if (dump_load_err)
2330 obj = dev_priv->guc.load_err_log;
2331 else if (dev_priv->guc.log.vma)
2332 obj = dev_priv->guc.log.vma->obj;
2333
2334 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002335 return 0;
2336
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002337 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2338 if (IS_ERR(log)) {
2339 DRM_DEBUG("Failed to pin object\n");
2340 seq_puts(m, "(log data unaccessible)\n");
2341 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002342 }
2343
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002344 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2345 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2346 *(log + i), *(log + i + 1),
2347 *(log + i + 2), *(log + i + 3));
2348
Alex Dai4c7e77f2015-08-12 15:43:40 +01002349 seq_putc(m, '\n');
2350
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002351 i915_gem_object_unpin_map(obj);
2352
Alex Dai4c7e77f2015-08-12 15:43:40 +01002353 return 0;
2354}
2355
Michał Winiarski4977a282018-03-19 10:53:40 +01002356static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302357{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002358 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302359
Michał Winiarski86aa8242018-03-08 16:46:53 +01002360 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002361 return -ENODEV;
2362
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002363 *val = intel_guc_log_get_level(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302364
2365 return 0;
2366}
2367
Michał Winiarski4977a282018-03-19 10:53:40 +01002368static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302369{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002370 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302371
Michał Winiarski86aa8242018-03-08 16:46:53 +01002372 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002373 return -ENODEV;
2374
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002375 return intel_guc_log_set_level(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302376}
2377
Michał Winiarski4977a282018-03-19 10:53:40 +01002378DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2379 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302380 "%lld\n");
2381
Michał Winiarski4977a282018-03-19 10:53:40 +01002382static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2383{
2384 struct drm_i915_private *dev_priv = inode->i_private;
2385
2386 if (!USES_GUC(dev_priv))
2387 return -ENODEV;
2388
2389 file->private_data = &dev_priv->guc.log;
2390
2391 return intel_guc_log_relay_open(&dev_priv->guc.log);
2392}
2393
2394static ssize_t
2395i915_guc_log_relay_write(struct file *filp,
2396 const char __user *ubuf,
2397 size_t cnt,
2398 loff_t *ppos)
2399{
2400 struct intel_guc_log *log = filp->private_data;
2401
2402 intel_guc_log_relay_flush(log);
2403
2404 return cnt;
2405}
2406
2407static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2408{
2409 struct drm_i915_private *dev_priv = inode->i_private;
2410
2411 intel_guc_log_relay_close(&dev_priv->guc.log);
2412
2413 return 0;
2414}
2415
2416static const struct file_operations i915_guc_log_relay_fops = {
2417 .owner = THIS_MODULE,
2418 .open = i915_guc_log_relay_open,
2419 .write = i915_guc_log_relay_write,
2420 .release = i915_guc_log_relay_release,
2421};
2422
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002423static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2424{
2425 u8 val;
2426 static const char * const sink_status[] = {
2427 "inactive",
2428 "transition to active, capture and display",
2429 "active, display from RFB",
2430 "active, capture and display on sink device timings",
2431 "transition to inactive, capture and display, timing re-sync",
2432 "reserved",
2433 "reserved",
2434 "sink internal error",
2435 };
2436 struct drm_connector *connector = m->private;
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002437 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002438 struct intel_dp *intel_dp =
2439 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002440 int ret;
2441
2442 if (!CAN_PSR(dev_priv)) {
2443 seq_puts(m, "PSR Unsupported\n");
2444 return -ENODEV;
2445 }
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002446
2447 if (connector->status != connector_status_connected)
2448 return -ENODEV;
2449
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002450 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2451
2452 if (ret == 1) {
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002453 const char *str = "unknown";
2454
2455 val &= DP_PSR_SINK_STATE_MASK;
2456 if (val < ARRAY_SIZE(sink_status))
2457 str = sink_status[val];
2458 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2459 } else {
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002460 return ret;
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002461 }
2462
2463 return 0;
2464}
2465DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2466
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302467static void
2468psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002469{
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002470 u32 val, status_val;
2471 const char *status = "unknown";
Chris Wilsonb86bef202017-01-16 13:06:21 +00002472
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302473 if (dev_priv->psr.psr2_enabled) {
2474 static const char * const live_status[] = {
2475 "IDLE",
2476 "CAPTURE",
2477 "CAPTURE_FS",
2478 "SLEEP",
2479 "BUFON_FW",
2480 "ML_UP",
2481 "SU_STANDBY",
2482 "FAST_SLEEP",
2483 "DEEP_SLEEP",
2484 "BUF_ON",
2485 "TG_ON"
2486 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002487 val = I915_READ(EDP_PSR2_STATUS);
2488 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2489 EDP_PSR2_STATUS_STATE_SHIFT;
2490 if (status_val < ARRAY_SIZE(live_status))
2491 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302492 } else {
2493 static const char * const live_status[] = {
2494 "IDLE",
2495 "SRDONACK",
2496 "SRDENT",
2497 "BUFOFF",
2498 "BUFON",
2499 "AUXACK",
2500 "SRDOFFACK",
2501 "SRDENT_ON",
2502 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002503 val = I915_READ(EDP_PSR_STATUS);
2504 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2505 EDP_PSR_STATUS_STATE_SHIFT;
2506 if (status_val < ARRAY_SIZE(live_status))
2507 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302508 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002509
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002510 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
Chris Wilsonb86bef202017-01-16 13:06:21 +00002511}
2512
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002513static int i915_edp_psr_status(struct seq_file *m, void *data)
2514{
David Weinehall36cdd012016-08-22 13:59:31 +03002515 struct drm_i915_private *dev_priv = node_to_i915(m->private);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002516 struct i915_psr *psr = &dev_priv->psr;
Chris Wilsona0371212019-01-14 14:21:14 +00002517 intel_wakeref_t wakeref;
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002518 const char *status;
2519 bool enabled;
2520 u32 val;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002521
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002522 if (!HAS_PSR(dev_priv))
2523 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002524
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002525 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2526 if (psr->dp)
2527 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2528 seq_puts(m, "\n");
2529
2530 if (!psr->sink_support)
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002531 return 0;
2532
Chris Wilsona0371212019-01-14 14:21:14 +00002533 wakeref = intel_runtime_pm_get(dev_priv);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002534 mutex_lock(&psr->lock);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002535
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002536 if (psr->enabled)
2537 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002538 else
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002539 status = "disabled";
2540 seq_printf(m, "PSR mode: %s\n", status);
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002541
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002542 if (!psr->enabled)
2543 goto unlock;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002544
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002545 if (psr->psr2_enabled) {
2546 val = I915_READ(EDP_PSR2_CTL);
2547 enabled = val & EDP_PSR2_ENABLE;
2548 } else {
2549 val = I915_READ(EDP_PSR_CTL);
2550 enabled = val & EDP_PSR_ENABLE;
2551 }
2552 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2553 enableddisabled(enabled), val);
2554 psr_source_status(dev_priv, m);
2555 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2556 psr->busy_frontbuffer_bits);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002557
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002558 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002559 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2560 */
David Weinehall36cdd012016-08-22 13:59:31 +03002561 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002562 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2563 seq_printf(m, "Performance counter: %u\n", val);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002564 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302565
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002566 if (psr->debug & I915_PSR_DEBUG_IRQ) {
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002567 seq_printf(m, "Last attempted entry at: %lld\n",
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002568 psr->last_entry_attempt);
2569 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002570 }
2571
José Roberto de Souzaa81f7812019-01-17 12:55:48 -08002572 if (psr->psr2_enabled) {
2573 u32 su_frames_val[3];
2574 int frame;
2575
2576 /*
2577 * Reading all 3 registers before hand to minimize crossing a
2578 * frame boundary between register reads
2579 */
2580 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2581 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2582
2583 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2584
2585 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2586 u32 su_blocks;
2587
2588 su_blocks = su_frames_val[frame / 3] &
2589 PSR2_SU_STATUS_MASK(frame);
2590 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2591 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2592 }
2593 }
2594
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002595unlock:
2596 mutex_unlock(&psr->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00002597 intel_runtime_pm_put(dev_priv, wakeref);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002598
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002599 return 0;
2600}
2601
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002602static int
2603i915_edp_psr_debug_set(void *data, u64 val)
2604{
2605 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00002606 intel_wakeref_t wakeref;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002607 int ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002608
2609 if (!CAN_PSR(dev_priv))
2610 return -ENODEV;
2611
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002612 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002613
Chris Wilsona0371212019-01-14 14:21:14 +00002614 wakeref = intel_runtime_pm_get(dev_priv);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002615
José Roberto de Souza23ec9f52019-02-06 13:18:45 -08002616 ret = intel_psr_debug_set(dev_priv, val);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002617
Chris Wilsona0371212019-01-14 14:21:14 +00002618 intel_runtime_pm_put(dev_priv, wakeref);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002619
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002620 return ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002621}
2622
2623static int
2624i915_edp_psr_debug_get(void *data, u64 *val)
2625{
2626 struct drm_i915_private *dev_priv = data;
2627
2628 if (!CAN_PSR(dev_priv))
2629 return -ENODEV;
2630
2631 *val = READ_ONCE(dev_priv->psr.debug);
2632 return 0;
2633}
2634
2635DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2636 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2637 "%llu\n");
2638
Jesse Barnesec013e72013-08-20 10:29:23 +01002639static int i915_energy_uJ(struct seq_file *m, void *data)
2640{
David Weinehall36cdd012016-08-22 13:59:31 +03002641 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002642 unsigned long long power;
Chris Wilsona0371212019-01-14 14:21:14 +00002643 intel_wakeref_t wakeref;
Jesse Barnesec013e72013-08-20 10:29:23 +01002644 u32 units;
2645
David Weinehall36cdd012016-08-22 13:59:31 +03002646 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002647 return -ENODEV;
2648
Chris Wilsond4225a52019-01-14 14:21:23 +00002649 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002650 return -ENODEV;
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002651
2652 units = (power & 0x1f00) >> 8;
Chris Wilsond4225a52019-01-14 14:21:23 +00002653 with_intel_runtime_pm(dev_priv, wakeref)
2654 power = I915_READ(MCH_SECP_NRG_STTS);
2655
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002656 power = (1000000 * power) >> units; /* convert to uJ */
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002657 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002658
2659 return 0;
2660}
2661
Damien Lespiau6455c872015-06-04 18:23:57 +01002662static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002663{
David Weinehall36cdd012016-08-22 13:59:31 +03002664 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002665 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002666
Chris Wilsona156e642016-04-03 14:14:21 +01002667 if (!HAS_RUNTIME_PM(dev_priv))
2668 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002669
Chris Wilson25c896bd2019-01-14 14:21:25 +00002670 seq_printf(m, "Runtime power status: %s\n",
2671 enableddisabled(!dev_priv->power_domains.wakeref));
2672
Chris Wilson6f561032018-01-24 11:36:07 +00002673 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2674 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
Paulo Zanoni371db662013-08-19 13:18:10 -03002675 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002676 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002677#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002678 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002679 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002680#else
2681 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2682#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002683 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002684 pci_power_name(pdev->current_state),
2685 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002686
Chris Wilsonbd780f32019-01-14 14:21:09 +00002687 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2688 struct drm_printer p = drm_seq_file_printer(m);
2689
2690 print_intel_runtime_pm_wakeref(dev_priv, &p);
2691 }
2692
Jesse Barnesec013e72013-08-20 10:29:23 +01002693 return 0;
2694}
2695
Imre Deak1da51582013-11-25 17:15:35 +02002696static int i915_power_domain_info(struct seq_file *m, void *unused)
2697{
David Weinehall36cdd012016-08-22 13:59:31 +03002698 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002699 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2700 int i;
2701
2702 mutex_lock(&power_domains->lock);
2703
2704 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2705 for (i = 0; i < power_domains->power_well_count; i++) {
2706 struct i915_power_well *power_well;
2707 enum intel_display_power_domain power_domain;
2708
2709 power_well = &power_domains->power_wells[i];
Imre Deakf28ec6f2018-08-06 12:58:37 +03002710 seq_printf(m, "%-25s %d\n", power_well->desc->name,
Imre Deak1da51582013-11-25 17:15:35 +02002711 power_well->count);
2712
Imre Deakf28ec6f2018-08-06 12:58:37 +03002713 for_each_power_domain(power_domain, power_well->desc->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002714 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002715 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002716 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002717 }
2718
2719 mutex_unlock(&power_domains->lock);
2720
2721 return 0;
2722}
2723
Damien Lespiaub7cec662015-10-27 14:47:01 +02002724static int i915_dmc_info(struct seq_file *m, void *unused)
2725{
David Weinehall36cdd012016-08-22 13:59:31 +03002726 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002727 intel_wakeref_t wakeref;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002728 struct intel_csr *csr;
2729
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002730 if (!HAS_CSR(dev_priv))
2731 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002732
2733 csr = &dev_priv->csr;
2734
Chris Wilsona0371212019-01-14 14:21:14 +00002735 wakeref = intel_runtime_pm_get(dev_priv);
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002736
Damien Lespiaub7cec662015-10-27 14:47:01 +02002737 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2738 seq_printf(m, "path: %s\n", csr->fw_path);
2739
2740 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002741 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002742
2743 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2744 CSR_VERSION_MINOR(csr->version));
2745
Imre Deak34b2f8d2018-10-31 22:02:20 +02002746 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2747 goto out;
2748
2749 seq_printf(m, "DC3 -> DC5 count: %d\n",
2750 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2751 SKL_CSR_DC3_DC5_COUNT));
2752 if (!IS_GEN9_LP(dev_priv))
Damien Lespiau83372062015-10-30 17:53:32 +02002753 seq_printf(m, "DC5 -> DC6 count: %d\n",
2754 I915_READ(SKL_CSR_DC5_DC6_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002755
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002756out:
2757 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2758 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2759 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2760
Chris Wilsona0371212019-01-14 14:21:14 +00002761 intel_runtime_pm_put(dev_priv, wakeref);
Damien Lespiau83372062015-10-30 17:53:32 +02002762
Damien Lespiaub7cec662015-10-27 14:47:01 +02002763 return 0;
2764}
2765
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002766static void intel_seq_print_mode(struct seq_file *m, int tabs,
2767 struct drm_display_mode *mode)
2768{
2769 int i;
2770
2771 for (i = 0; i < tabs; i++)
2772 seq_putc(m, '\t');
2773
Shayenne Moura4fb6bb82018-12-20 10:27:57 -02002774 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002775}
2776
2777static void intel_encoder_info(struct seq_file *m,
2778 struct intel_crtc *intel_crtc,
2779 struct intel_encoder *intel_encoder)
2780{
David Weinehall36cdd012016-08-22 13:59:31 +03002781 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2782 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002783 struct drm_crtc *crtc = &intel_crtc->base;
2784 struct intel_connector *intel_connector;
2785 struct drm_encoder *encoder;
2786
2787 encoder = &intel_encoder->base;
2788 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002789 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002790 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2791 struct drm_connector *connector = &intel_connector->base;
2792 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2793 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002794 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002795 drm_get_connector_status_name(connector->status));
2796 if (connector->status == connector_status_connected) {
2797 struct drm_display_mode *mode = &crtc->mode;
2798 seq_printf(m, ", mode:\n");
2799 intel_seq_print_mode(m, 2, mode);
2800 } else {
2801 seq_putc(m, '\n');
2802 }
2803 }
2804}
2805
2806static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2807{
David Weinehall36cdd012016-08-22 13:59:31 +03002808 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2809 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002810 struct drm_crtc *crtc = &intel_crtc->base;
2811 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002812 struct drm_plane_state *plane_state = crtc->primary->state;
2813 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002814
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002815 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002816 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002817 fb->base.id, plane_state->src_x >> 16,
2818 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002819 else
2820 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002821 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2822 intel_encoder_info(m, intel_crtc, intel_encoder);
2823}
2824
2825static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2826{
2827 struct drm_display_mode *mode = panel->fixed_mode;
2828
2829 seq_printf(m, "\tfixed mode:\n");
2830 intel_seq_print_mode(m, 2, mode);
2831}
2832
2833static void intel_dp_info(struct seq_file *m,
2834 struct intel_connector *intel_connector)
2835{
2836 struct intel_encoder *intel_encoder = intel_connector->encoder;
2837 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2838
2839 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03002840 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02002841 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002842 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03002843
2844 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2845 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002846}
2847
Libin Yang9a148a92016-11-28 20:07:05 +08002848static void intel_dp_mst_info(struct seq_file *m,
2849 struct intel_connector *intel_connector)
2850{
2851 struct intel_encoder *intel_encoder = intel_connector->encoder;
2852 struct intel_dp_mst_encoder *intel_mst =
2853 enc_to_mst(&intel_encoder->base);
2854 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2855 struct intel_dp *intel_dp = &intel_dig_port->dp;
2856 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2857 intel_connector->port);
2858
2859 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2860}
2861
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002862static void intel_hdmi_info(struct seq_file *m,
2863 struct intel_connector *intel_connector)
2864{
2865 struct intel_encoder *intel_encoder = intel_connector->encoder;
2866 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2867
Jani Nikula742f4912015-09-03 11:16:09 +03002868 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002869}
2870
2871static void intel_lvds_info(struct seq_file *m,
2872 struct intel_connector *intel_connector)
2873{
2874 intel_panel_info(m, &intel_connector->panel);
2875}
2876
2877static void intel_connector_info(struct seq_file *m,
2878 struct drm_connector *connector)
2879{
2880 struct intel_connector *intel_connector = to_intel_connector(connector);
2881 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08002882 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002883
2884 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03002885 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002886 drm_get_connector_status_name(connector->status));
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002887
2888 if (connector->status == connector_status_disconnected)
2889 return;
2890
2891 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2892 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2893 connector->display_info.width_mm,
2894 connector->display_info.height_mm);
2895 seq_printf(m, "\tsubpixel order: %s\n",
2896 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2897 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002898
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02002899 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002900 return;
2901
2902 switch (connector->connector_type) {
2903 case DRM_MODE_CONNECTOR_DisplayPort:
2904 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08002905 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2906 intel_dp_mst_info(m, intel_connector);
2907 else
2908 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002909 break;
2910 case DRM_MODE_CONNECTOR_LVDS:
2911 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10002912 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002913 break;
2914 case DRM_MODE_CONNECTOR_HDMIA:
2915 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03002916 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002917 intel_hdmi_info(m, intel_connector);
2918 break;
2919 default:
2920 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10002921 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002922
Jesse Barnesf103fc72014-02-20 12:39:57 -08002923 seq_printf(m, "\tmodes:\n");
2924 list_for_each_entry(mode, &connector->modes, head)
2925 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002926}
2927
Robert Fekete3abc4e02015-10-27 16:58:32 +01002928static const char *plane_type(enum drm_plane_type type)
2929{
2930 switch (type) {
2931 case DRM_PLANE_TYPE_OVERLAY:
2932 return "OVL";
2933 case DRM_PLANE_TYPE_PRIMARY:
2934 return "PRI";
2935 case DRM_PLANE_TYPE_CURSOR:
2936 return "CUR";
2937 /*
2938 * Deliberately omitting default: to generate compiler warnings
2939 * when a new drm_plane_type gets added.
2940 */
2941 }
2942
2943 return "unknown";
2944}
2945
Jani Nikula5852a152019-01-07 16:51:49 +02002946static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
Robert Fekete3abc4e02015-10-27 16:58:32 +01002947{
Robert Fekete3abc4e02015-10-27 16:58:32 +01002948 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04002949 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01002950 * will print them all to visualize if the values are misused
2951 */
Jani Nikula5852a152019-01-07 16:51:49 +02002952 snprintf(buf, bufsize,
Robert Fekete3abc4e02015-10-27 16:58:32 +01002953 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04002954 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2955 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2956 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2957 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2958 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2959 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01002960 rotation);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002961}
2962
2963static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2964{
David Weinehall36cdd012016-08-22 13:59:31 +03002965 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2966 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01002967 struct intel_plane *intel_plane;
2968
2969 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2970 struct drm_plane_state *state;
2971 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002972 struct drm_format_name_buf format_name;
Jani Nikula5852a152019-01-07 16:51:49 +02002973 char rot_str[48];
Robert Fekete3abc4e02015-10-27 16:58:32 +01002974
2975 if (!plane->state) {
2976 seq_puts(m, "plane->state is NULL!\n");
2977 continue;
2978 }
2979
2980 state = plane->state;
2981
Eric Engestrom90844f02016-08-15 01:02:38 +01002982 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002983 drm_get_format_name(state->fb->format->format,
2984 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01002985 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002986 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01002987 }
2988
Jani Nikula5852a152019-01-07 16:51:49 +02002989 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2990
Robert Fekete3abc4e02015-10-27 16:58:32 +01002991 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2992 plane->base.id,
2993 plane_type(intel_plane->base.type),
2994 state->crtc_x, state->crtc_y,
2995 state->crtc_w, state->crtc_h,
2996 (state->src_x >> 16),
2997 ((state->src_x & 0xffff) * 15625) >> 10,
2998 (state->src_y >> 16),
2999 ((state->src_y & 0xffff) * 15625) >> 10,
3000 (state->src_w >> 16),
3001 ((state->src_w & 0xffff) * 15625) >> 10,
3002 (state->src_h >> 16),
3003 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003004 format_name.str,
Jani Nikula5852a152019-01-07 16:51:49 +02003005 rot_str);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003006 }
3007}
3008
3009static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3010{
3011 struct intel_crtc_state *pipe_config;
3012 int num_scalers = intel_crtc->num_scalers;
3013 int i;
3014
3015 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3016
3017 /* Not all platformas have a scaler */
3018 if (num_scalers) {
3019 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3020 num_scalers,
3021 pipe_config->scaler_state.scaler_users,
3022 pipe_config->scaler_state.scaler_id);
3023
A.Sunil Kamath58415912016-11-20 23:20:26 +05303024 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003025 struct intel_scaler *sc =
3026 &pipe_config->scaler_state.scalers[i];
3027
3028 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3029 i, yesno(sc->in_use), sc->mode);
3030 }
3031 seq_puts(m, "\n");
3032 } else {
3033 seq_puts(m, "\tNo scalers available on this platform\n");
3034 }
3035}
3036
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003037static int i915_display_info(struct seq_file *m, void *unused)
3038{
David Weinehall36cdd012016-08-22 13:59:31 +03003039 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3040 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec22014-03-12 09:13:13 +00003041 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003042 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003043 struct drm_connector_list_iter conn_iter;
Chris Wilsona0371212019-01-14 14:21:14 +00003044 intel_wakeref_t wakeref;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003045
Chris Wilsona0371212019-01-14 14:21:14 +00003046 wakeref = intel_runtime_pm_get(dev_priv);
3047
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003048 seq_printf(m, "CRTC info\n");
3049 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003050 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003051 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003052
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003053 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003054 pipe_config = to_intel_crtc_state(crtc->base.state);
3055
Robert Fekete3abc4e02015-10-27 16:58:32 +01003056 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec22014-03-12 09:13:13 +00003057 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003058 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003059 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3060 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3061
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003062 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003063 struct intel_plane *cursor =
3064 to_intel_plane(crtc->base.cursor);
3065
Chris Wilson065f2ec22014-03-12 09:13:13 +00003066 intel_crtc_info(m, crtc);
3067
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003068 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3069 yesno(cursor->base.state->visible),
3070 cursor->base.state->crtc_x,
3071 cursor->base.state->crtc_y,
3072 cursor->base.state->crtc_w,
3073 cursor->base.state->crtc_h,
3074 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003075 intel_scaler_info(m, crtc);
3076 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003077 }
Daniel Vettercace8412014-05-22 17:56:31 +02003078
3079 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3080 yesno(!crtc->cpu_fifo_underrun_disabled),
3081 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003082 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003083 }
3084
3085 seq_printf(m, "\n");
3086 seq_printf(m, "Connector info\n");
3087 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003088 mutex_lock(&dev->mode_config.mutex);
3089 drm_connector_list_iter_begin(dev, &conn_iter);
3090 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003091 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003092 drm_connector_list_iter_end(&conn_iter);
3093 mutex_unlock(&dev->mode_config.mutex);
3094
Chris Wilsona0371212019-01-14 14:21:14 +00003095 intel_runtime_pm_put(dev_priv, wakeref);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003096
3097 return 0;
3098}
3099
Chris Wilson1b365952016-10-04 21:11:31 +01003100static int i915_engine_info(struct seq_file *m, void *unused)
3101{
3102 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3103 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +00003104 intel_wakeref_t wakeref;
Akash Goel3b3f1652016-10-13 22:44:48 +05303105 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003106 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003107
Chris Wilsona0371212019-01-14 14:21:14 +00003108 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01003109
Chris Wilson6f561032018-01-24 11:36:07 +00003110 seq_printf(m, "GT awake? %s (epoch %u)\n",
3111 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003112 seq_printf(m, "Global active requests: %d\n",
3113 dev_priv->gt.active_requests);
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003114 seq_printf(m, "CS timestamp frequency: %u kHz\n",
Jani Nikula02584042018-12-31 16:56:41 +02003115 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003116
Chris Wilsonf636edb2017-10-09 12:02:57 +01003117 p = drm_seq_file_printer(m);
3118 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003119 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003120
Chris Wilsona0371212019-01-14 14:21:14 +00003121 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilson9c870d02016-10-24 13:42:15 +01003122
Chris Wilson1b365952016-10-04 21:11:31 +01003123 return 0;
3124}
3125
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003126static int i915_rcs_topology(struct seq_file *m, void *unused)
3127{
3128 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3129 struct drm_printer p = drm_seq_file_printer(m);
3130
Jani Nikula02584042018-12-31 16:56:41 +02003131 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003132
3133 return 0;
3134}
3135
Chris Wilsonc5418a82017-10-13 21:26:19 +01003136static int i915_shrinker_info(struct seq_file *m, void *unused)
3137{
3138 struct drm_i915_private *i915 = node_to_i915(m->private);
3139
3140 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3141 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3142
3143 return 0;
3144}
3145
Daniel Vetter728e29d2014-06-25 22:01:53 +03003146static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3147{
David Weinehall36cdd012016-08-22 13:59:31 +03003148 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3149 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003150 int i;
3151
3152 drm_modeset_lock_all(dev);
3153 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3154 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3155
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003156 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003157 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003158 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003159 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003160 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003161 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003162 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003163 pll->state.hw_state.dpll_md);
3164 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3165 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3166 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07003167 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3168 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3169 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3170 pll->state.hw_state.mg_refclkin_ctl);
3171 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3172 pll->state.hw_state.mg_clktop2_coreclkctl1);
3173 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3174 pll->state.hw_state.mg_clktop2_hsclkctl);
3175 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3176 pll->state.hw_state.mg_pll_div0);
3177 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3178 pll->state.hw_state.mg_pll_div1);
3179 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3180 pll->state.hw_state.mg_pll_lf);
3181 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3182 pll->state.hw_state.mg_pll_frac_lock);
3183 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3184 pll->state.hw_state.mg_pll_ssc);
3185 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3186 pll->state.hw_state.mg_pll_bias);
3187 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3188 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003189 }
3190 drm_modeset_unlock_all(dev);
3191
3192 return 0;
3193}
3194
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003195static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003196{
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003197 struct drm_i915_private *i915 = node_to_i915(m->private);
3198 const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3199 struct i915_wa *wa;
3200 unsigned int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003201
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003202 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3203 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
Chris Wilson548764b2018-06-15 13:02:07 +01003204 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003205 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
Arun Siluvery888b5992014-08-26 14:44:51 +01003206
3207 return 0;
3208}
3209
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303210static int i915_ipc_status_show(struct seq_file *m, void *data)
3211{
3212 struct drm_i915_private *dev_priv = m->private;
3213
3214 seq_printf(m, "Isochronous Priority Control: %s\n",
3215 yesno(dev_priv->ipc_enabled));
3216 return 0;
3217}
3218
3219static int i915_ipc_status_open(struct inode *inode, struct file *file)
3220{
3221 struct drm_i915_private *dev_priv = inode->i_private;
3222
3223 if (!HAS_IPC(dev_priv))
3224 return -ENODEV;
3225
3226 return single_open(file, i915_ipc_status_show, dev_priv);
3227}
3228
3229static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3230 size_t len, loff_t *offp)
3231{
3232 struct seq_file *m = file->private_data;
3233 struct drm_i915_private *dev_priv = m->private;
Chris Wilsona0371212019-01-14 14:21:14 +00003234 intel_wakeref_t wakeref;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303235 bool enable;
Chris Wilsond4225a52019-01-14 14:21:23 +00003236 int ret;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303237
3238 ret = kstrtobool_from_user(ubuf, len, &enable);
3239 if (ret < 0)
3240 return ret;
3241
Chris Wilsond4225a52019-01-14 14:21:23 +00003242 with_intel_runtime_pm(dev_priv, wakeref) {
3243 if (!dev_priv->ipc_enabled && enable)
3244 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3245 dev_priv->wm.distrust_bios_wm = true;
3246 dev_priv->ipc_enabled = enable;
3247 intel_enable_ipc(dev_priv);
3248 }
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303249
3250 return len;
3251}
3252
3253static const struct file_operations i915_ipc_status_fops = {
3254 .owner = THIS_MODULE,
3255 .open = i915_ipc_status_open,
3256 .read = seq_read,
3257 .llseek = seq_lseek,
3258 .release = single_release,
3259 .write = i915_ipc_status_write
3260};
3261
Damien Lespiauc5511e42014-11-04 17:06:51 +00003262static int i915_ddb_info(struct seq_file *m, void *unused)
3263{
David Weinehall36cdd012016-08-22 13:59:31 +03003264 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3265 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003266 struct skl_ddb_entry *entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003267 struct intel_crtc *crtc;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003268
David Weinehall36cdd012016-08-22 13:59:31 +03003269 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003270 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003271
Damien Lespiauc5511e42014-11-04 17:06:51 +00003272 drm_modeset_lock_all(dev);
3273
Damien Lespiauc5511e42014-11-04 17:06:51 +00003274 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3275
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003276 for_each_intel_crtc(&dev_priv->drm, crtc) {
3277 struct intel_crtc_state *crtc_state =
3278 to_intel_crtc_state(crtc->base.state);
3279 enum pipe pipe = crtc->pipe;
3280 enum plane_id plane_id;
3281
Damien Lespiauc5511e42014-11-04 17:06:51 +00003282 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3283
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003284 for_each_plane_id_on_crtc(crtc, plane_id) {
3285 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3286 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
Damien Lespiauc5511e42014-11-04 17:06:51 +00003287 entry->start, entry->end,
3288 skl_ddb_entry_size(entry));
3289 }
3290
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003291 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003292 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3293 entry->end, skl_ddb_entry_size(entry));
3294 }
3295
3296 drm_modeset_unlock_all(dev);
3297
3298 return 0;
3299}
3300
Vandana Kannana54746e2015-03-03 20:53:10 +05303301static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003302 struct drm_device *dev,
3303 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303304{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003305 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303306 struct i915_drrs *drrs = &dev_priv->drrs;
3307 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003308 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003309 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303310
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003311 drm_connector_list_iter_begin(dev, &conn_iter);
3312 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003313 if (connector->state->crtc != &intel_crtc->base)
3314 continue;
3315
3316 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303317 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003318 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303319
3320 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3321 seq_puts(m, "\tVBT: DRRS_type: Static");
3322 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3323 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3324 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3325 seq_puts(m, "\tVBT: DRRS_type: None");
3326 else
3327 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3328
3329 seq_puts(m, "\n\n");
3330
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003331 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303332 struct intel_panel *panel;
3333
3334 mutex_lock(&drrs->mutex);
3335 /* DRRS Supported */
3336 seq_puts(m, "\tDRRS Supported: Yes\n");
3337
3338 /* disable_drrs() will make drrs->dp NULL */
3339 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303340 seq_puts(m, "Idleness DRRS: Disabled\n");
3341 if (dev_priv->psr.enabled)
3342 seq_puts(m,
3343 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303344 mutex_unlock(&drrs->mutex);
3345 return;
3346 }
3347
3348 panel = &drrs->dp->attached_connector->panel;
3349 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3350 drrs->busy_frontbuffer_bits);
3351
3352 seq_puts(m, "\n\t\t");
3353 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3354 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3355 vrefresh = panel->fixed_mode->vrefresh;
3356 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3357 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3358 vrefresh = panel->downclock_mode->vrefresh;
3359 } else {
3360 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3361 drrs->refresh_rate_type);
3362 mutex_unlock(&drrs->mutex);
3363 return;
3364 }
3365 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3366
3367 seq_puts(m, "\n\t\t");
3368 mutex_unlock(&drrs->mutex);
3369 } else {
3370 /* DRRS not supported. Print the VBT parameter*/
3371 seq_puts(m, "\tDRRS Supported : No");
3372 }
3373 seq_puts(m, "\n");
3374}
3375
3376static int i915_drrs_status(struct seq_file *m, void *unused)
3377{
David Weinehall36cdd012016-08-22 13:59:31 +03003378 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3379 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303380 struct intel_crtc *intel_crtc;
3381 int active_crtc_cnt = 0;
3382
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003383 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303384 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003385 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303386 active_crtc_cnt++;
3387 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3388
3389 drrs_status_per_crtc(m, dev, intel_crtc);
3390 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303391 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003392 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303393
3394 if (!active_crtc_cnt)
3395 seq_puts(m, "No active crtc found\n");
3396
3397 return 0;
3398}
3399
Dave Airlie11bed952014-05-12 15:22:27 +10003400static int i915_dp_mst_info(struct seq_file *m, void *unused)
3401{
David Weinehall36cdd012016-08-22 13:59:31 +03003402 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3403 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003404 struct intel_encoder *intel_encoder;
3405 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003406 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003407 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003408
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003409 drm_connector_list_iter_begin(dev, &conn_iter);
3410 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003411 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003412 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003413
3414 intel_encoder = intel_attached_encoder(connector);
3415 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3416 continue;
3417
3418 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003419 if (!intel_dig_port->dp.can_mst)
3420 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003421
Jim Bride40ae80c2016-04-14 10:18:37 -07003422 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003423 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003424 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3425 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003426 drm_connector_list_iter_end(&conn_iter);
3427
Dave Airlie11bed952014-05-12 15:22:27 +10003428 return 0;
3429}
3430
Todd Previteeb3394fa2015-04-18 00:04:19 -07003431static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003432 const char __user *ubuf,
3433 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003434{
3435 char *input_buffer;
3436 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003437 struct drm_device *dev;
3438 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003439 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003440 struct intel_dp *intel_dp;
3441 int val = 0;
3442
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303443 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003444
Todd Previteeb3394fa2015-04-18 00:04:19 -07003445 if (len == 0)
3446 return 0;
3447
Geliang Tang261aeba2017-05-06 23:40:17 +08003448 input_buffer = memdup_user_nul(ubuf, len);
3449 if (IS_ERR(input_buffer))
3450 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003451
Todd Previteeb3394fa2015-04-18 00:04:19 -07003452 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3453
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003454 drm_connector_list_iter_begin(dev, &conn_iter);
3455 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003456 struct intel_encoder *encoder;
3457
Todd Previteeb3394fa2015-04-18 00:04:19 -07003458 if (connector->connector_type !=
3459 DRM_MODE_CONNECTOR_DisplayPort)
3460 continue;
3461
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003462 encoder = to_intel_encoder(connector->encoder);
3463 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3464 continue;
3465
3466 if (encoder && connector->status == connector_status_connected) {
3467 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003468 status = kstrtoint(input_buffer, 10, &val);
3469 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003470 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003471 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3472 /* To prevent erroneous activation of the compliance
3473 * testing code, only accept an actual value of 1 here
3474 */
3475 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003476 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003477 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003478 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003479 }
3480 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003481 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003482 kfree(input_buffer);
3483 if (status < 0)
3484 return status;
3485
3486 *offp += len;
3487 return len;
3488}
3489
3490static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3491{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003492 struct drm_i915_private *dev_priv = m->private;
3493 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003494 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003495 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003496 struct intel_dp *intel_dp;
3497
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003498 drm_connector_list_iter_begin(dev, &conn_iter);
3499 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003500 struct intel_encoder *encoder;
3501
Todd Previteeb3394fa2015-04-18 00:04:19 -07003502 if (connector->connector_type !=
3503 DRM_MODE_CONNECTOR_DisplayPort)
3504 continue;
3505
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003506 encoder = to_intel_encoder(connector->encoder);
3507 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3508 continue;
3509
3510 if (encoder && connector->status == connector_status_connected) {
3511 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003512 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003513 seq_puts(m, "1");
3514 else
3515 seq_puts(m, "0");
3516 } else
3517 seq_puts(m, "0");
3518 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003519 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003520
3521 return 0;
3522}
3523
3524static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003525 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003526{
David Weinehall36cdd012016-08-22 13:59:31 +03003527 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003528 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003529}
3530
3531static const struct file_operations i915_displayport_test_active_fops = {
3532 .owner = THIS_MODULE,
3533 .open = i915_displayport_test_active_open,
3534 .read = seq_read,
3535 .llseek = seq_lseek,
3536 .release = single_release,
3537 .write = i915_displayport_test_active_write
3538};
3539
3540static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3541{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003542 struct drm_i915_private *dev_priv = m->private;
3543 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003544 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003545 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003546 struct intel_dp *intel_dp;
3547
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003548 drm_connector_list_iter_begin(dev, &conn_iter);
3549 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003550 struct intel_encoder *encoder;
3551
Todd Previteeb3394fa2015-04-18 00:04:19 -07003552 if (connector->connector_type !=
3553 DRM_MODE_CONNECTOR_DisplayPort)
3554 continue;
3555
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003556 encoder = to_intel_encoder(connector->encoder);
3557 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3558 continue;
3559
3560 if (encoder && connector->status == connector_status_connected) {
3561 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003562 if (intel_dp->compliance.test_type ==
3563 DP_TEST_LINK_EDID_READ)
3564 seq_printf(m, "%lx",
3565 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003566 else if (intel_dp->compliance.test_type ==
3567 DP_TEST_LINK_VIDEO_PATTERN) {
3568 seq_printf(m, "hdisplay: %d\n",
3569 intel_dp->compliance.test_data.hdisplay);
3570 seq_printf(m, "vdisplay: %d\n",
3571 intel_dp->compliance.test_data.vdisplay);
3572 seq_printf(m, "bpc: %u\n",
3573 intel_dp->compliance.test_data.bpc);
3574 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003575 } else
3576 seq_puts(m, "0");
3577 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003578 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003579
3580 return 0;
3581}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003582DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003583
3584static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3585{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003586 struct drm_i915_private *dev_priv = m->private;
3587 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003588 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003589 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003590 struct intel_dp *intel_dp;
3591
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003592 drm_connector_list_iter_begin(dev, &conn_iter);
3593 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003594 struct intel_encoder *encoder;
3595
Todd Previteeb3394fa2015-04-18 00:04:19 -07003596 if (connector->connector_type !=
3597 DRM_MODE_CONNECTOR_DisplayPort)
3598 continue;
3599
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003600 encoder = to_intel_encoder(connector->encoder);
3601 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3602 continue;
3603
3604 if (encoder && connector->status == connector_status_connected) {
3605 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003606 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003607 } else
3608 seq_puts(m, "0");
3609 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003610 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003611
3612 return 0;
3613}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003614DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003615
Jani Nikulae5315212019-01-16 11:15:23 +02003616static void wm_latency_show(struct seq_file *m, const u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003617{
David Weinehall36cdd012016-08-22 13:59:31 +03003618 struct drm_i915_private *dev_priv = m->private;
3619 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003620 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003621 int num_levels;
3622
David Weinehall36cdd012016-08-22 13:59:31 +03003623 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003624 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003625 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003626 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003627 else if (IS_G4X(dev_priv))
3628 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003629 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003630 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003631
3632 drm_modeset_lock_all(dev);
3633
3634 for (level = 0; level < num_levels; level++) {
3635 unsigned int latency = wm[level];
3636
Damien Lespiau97e94b22014-11-04 17:06:50 +00003637 /*
3638 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003639 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003640 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003641 if (INTEL_GEN(dev_priv) >= 9 ||
3642 IS_VALLEYVIEW(dev_priv) ||
3643 IS_CHERRYVIEW(dev_priv) ||
3644 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003645 latency *= 10;
3646 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003647 latency *= 5;
3648
3649 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003650 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003651 }
3652
3653 drm_modeset_unlock_all(dev);
3654}
3655
3656static int pri_wm_latency_show(struct seq_file *m, void *data)
3657{
David Weinehall36cdd012016-08-22 13:59:31 +03003658 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003659 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003660
David Weinehall36cdd012016-08-22 13:59:31 +03003661 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003662 latencies = dev_priv->wm.skl_latency;
3663 else
David Weinehall36cdd012016-08-22 13:59:31 +03003664 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003665
3666 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003667
3668 return 0;
3669}
3670
3671static int spr_wm_latency_show(struct seq_file *m, void *data)
3672{
David Weinehall36cdd012016-08-22 13:59:31 +03003673 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003674 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003675
David Weinehall36cdd012016-08-22 13:59:31 +03003676 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003677 latencies = dev_priv->wm.skl_latency;
3678 else
David Weinehall36cdd012016-08-22 13:59:31 +03003679 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003680
3681 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003682
3683 return 0;
3684}
3685
3686static int cur_wm_latency_show(struct seq_file *m, void *data)
3687{
David Weinehall36cdd012016-08-22 13:59:31 +03003688 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003689 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003690
David Weinehall36cdd012016-08-22 13:59:31 +03003691 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003692 latencies = dev_priv->wm.skl_latency;
3693 else
David Weinehall36cdd012016-08-22 13:59:31 +03003694 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003695
3696 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003697
3698 return 0;
3699}
3700
3701static int pri_wm_latency_open(struct inode *inode, struct file *file)
3702{
David Weinehall36cdd012016-08-22 13:59:31 +03003703 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003704
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003705 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003706 return -ENODEV;
3707
David Weinehall36cdd012016-08-22 13:59:31 +03003708 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003709}
3710
3711static int spr_wm_latency_open(struct inode *inode, struct file *file)
3712{
David Weinehall36cdd012016-08-22 13:59:31 +03003713 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003714
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003715 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003716 return -ENODEV;
3717
David Weinehall36cdd012016-08-22 13:59:31 +03003718 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003719}
3720
3721static int cur_wm_latency_open(struct inode *inode, struct file *file)
3722{
David Weinehall36cdd012016-08-22 13:59:31 +03003723 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003724
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003725 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003726 return -ENODEV;
3727
David Weinehall36cdd012016-08-22 13:59:31 +03003728 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003729}
3730
3731static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Jani Nikulae5315212019-01-16 11:15:23 +02003732 size_t len, loff_t *offp, u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003733{
3734 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003735 struct drm_i915_private *dev_priv = m->private;
3736 struct drm_device *dev = &dev_priv->drm;
Jani Nikulae5315212019-01-16 11:15:23 +02003737 u16 new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003738 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003739 int level;
3740 int ret;
3741 char tmp[32];
3742
David Weinehall36cdd012016-08-22 13:59:31 +03003743 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003744 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003745 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003746 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003747 else if (IS_G4X(dev_priv))
3748 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003749 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003750 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003751
Ville Syrjälä369a1342014-01-22 14:36:08 +02003752 if (len >= sizeof(tmp))
3753 return -EINVAL;
3754
3755 if (copy_from_user(tmp, ubuf, len))
3756 return -EFAULT;
3757
3758 tmp[len] = '\0';
3759
Damien Lespiau97e94b22014-11-04 17:06:50 +00003760 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3761 &new[0], &new[1], &new[2], &new[3],
3762 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003763 if (ret != num_levels)
3764 return -EINVAL;
3765
3766 drm_modeset_lock_all(dev);
3767
3768 for (level = 0; level < num_levels; level++)
3769 wm[level] = new[level];
3770
3771 drm_modeset_unlock_all(dev);
3772
3773 return len;
3774}
3775
3776
3777static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3778 size_t len, loff_t *offp)
3779{
3780 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003781 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003782 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003783
David Weinehall36cdd012016-08-22 13:59:31 +03003784 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003785 latencies = dev_priv->wm.skl_latency;
3786 else
David Weinehall36cdd012016-08-22 13:59:31 +03003787 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003788
3789 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003790}
3791
3792static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3793 size_t len, loff_t *offp)
3794{
3795 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003796 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003797 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003798
David Weinehall36cdd012016-08-22 13:59:31 +03003799 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003800 latencies = dev_priv->wm.skl_latency;
3801 else
David Weinehall36cdd012016-08-22 13:59:31 +03003802 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003803
3804 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003805}
3806
3807static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3808 size_t len, loff_t *offp)
3809{
3810 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003811 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003812 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003813
David Weinehall36cdd012016-08-22 13:59:31 +03003814 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003815 latencies = dev_priv->wm.skl_latency;
3816 else
David Weinehall36cdd012016-08-22 13:59:31 +03003817 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003818
3819 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003820}
3821
3822static const struct file_operations i915_pri_wm_latency_fops = {
3823 .owner = THIS_MODULE,
3824 .open = pri_wm_latency_open,
3825 .read = seq_read,
3826 .llseek = seq_lseek,
3827 .release = single_release,
3828 .write = pri_wm_latency_write
3829};
3830
3831static const struct file_operations i915_spr_wm_latency_fops = {
3832 .owner = THIS_MODULE,
3833 .open = spr_wm_latency_open,
3834 .read = seq_read,
3835 .llseek = seq_lseek,
3836 .release = single_release,
3837 .write = spr_wm_latency_write
3838};
3839
3840static const struct file_operations i915_cur_wm_latency_fops = {
3841 .owner = THIS_MODULE,
3842 .open = cur_wm_latency_open,
3843 .read = seq_read,
3844 .llseek = seq_lseek,
3845 .release = single_release,
3846 .write = cur_wm_latency_write
3847};
3848
Kees Cook647416f2013-03-10 14:10:06 -07003849static int
3850i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003851{
David Weinehall36cdd012016-08-22 13:59:31 +03003852 struct drm_i915_private *dev_priv = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003853
Chris Wilsond98c52c2016-04-13 17:35:05 +01003854 *val = i915_terminally_wedged(&dev_priv->gpu_error);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003855
Kees Cook647416f2013-03-10 14:10:06 -07003856 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003857}
3858
Kees Cook647416f2013-03-10 14:10:06 -07003859static int
3860i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003861{
Chris Wilson598b6b52017-03-25 13:47:35 +00003862 struct drm_i915_private *i915 = data;
Imre Deakd46c0512014-04-14 20:24:27 +03003863
Chris Wilson15cbf002019-02-08 15:37:06 +00003864 /* Flush any previous reset before applying for a new one */
3865 wait_event(i915->gpu_error.reset_queue,
3866 !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003867
Chris Wilsonce800752018-03-20 10:04:49 +00003868 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3869 "Manually set wedged engine mask = %llx", val);
Kees Cook647416f2013-03-10 14:10:06 -07003870 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003871}
3872
Kees Cook647416f2013-03-10 14:10:06 -07003873DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3874 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03003875 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003876
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003877#define DROP_UNBOUND BIT(0)
3878#define DROP_BOUND BIT(1)
3879#define DROP_RETIRE BIT(2)
3880#define DROP_ACTIVE BIT(3)
3881#define DROP_FREED BIT(4)
3882#define DROP_SHRINK_ALL BIT(5)
3883#define DROP_IDLE BIT(6)
Chris Wilson6b048702018-09-03 09:33:37 +01003884#define DROP_RESET_ACTIVE BIT(7)
3885#define DROP_RESET_SEQNO BIT(8)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003886#define DROP_ALL (DROP_UNBOUND | \
3887 DROP_BOUND | \
3888 DROP_RETIRE | \
3889 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00003890 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003891 DROP_SHRINK_ALL |\
Chris Wilson6b048702018-09-03 09:33:37 +01003892 DROP_IDLE | \
3893 DROP_RESET_ACTIVE | \
3894 DROP_RESET_SEQNO)
Kees Cook647416f2013-03-10 14:10:06 -07003895static int
3896i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003897{
Kees Cook647416f2013-03-10 14:10:06 -07003898 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00003899
Kees Cook647416f2013-03-10 14:10:06 -07003900 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003901}
3902
Kees Cook647416f2013-03-10 14:10:06 -07003903static int
3904i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003905{
Chris Wilson6b048702018-09-03 09:33:37 +01003906 struct drm_i915_private *i915 = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003907 intel_wakeref_t wakeref;
Chris Wilson00c26cf2017-05-24 17:26:53 +01003908 int ret = 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003909
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003910 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3911 val, val & DROP_ALL);
Chris Wilsona0371212019-01-14 14:21:14 +00003912 wakeref = intel_runtime_pm_get(i915);
Chris Wilsondd624af2013-01-15 12:39:35 +00003913
Chris Wilsonad4062d2019-01-28 01:02:18 +00003914 if (val & DROP_RESET_ACTIVE &&
3915 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
Chris Wilson6b048702018-09-03 09:33:37 +01003916 i915_gem_set_wedged(i915);
3917
Chris Wilsondd624af2013-01-15 12:39:35 +00003918 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3919 * on ioctls on -EAGAIN. */
Chris Wilson6b048702018-09-03 09:33:37 +01003920 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3921 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00003922 if (ret)
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03003923 goto out;
Chris Wilsondd624af2013-01-15 12:39:35 +00003924
Chris Wilson00c26cf2017-05-24 17:26:53 +01003925 if (val & DROP_ACTIVE)
Chris Wilson6b048702018-09-03 09:33:37 +01003926 ret = i915_gem_wait_for_idle(i915,
Chris Wilson00c26cf2017-05-24 17:26:53 +01003927 I915_WAIT_INTERRUPTIBLE |
Chris Wilsonec625fb2018-07-09 13:20:42 +01003928 I915_WAIT_LOCKED,
3929 MAX_SCHEDULE_TIMEOUT);
Chris Wilson00c26cf2017-05-24 17:26:53 +01003930
Chris Wilson6b048702018-09-03 09:33:37 +01003931 if (val & DROP_RETIRE)
3932 i915_retire_requests(i915);
3933
3934 mutex_unlock(&i915->drm.struct_mutex);
3935 }
3936
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00003937 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
Chris Wilson6b048702018-09-03 09:33:37 +01003938 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003939
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003940 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01003941 if (val & DROP_BOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003942 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01003943
Chris Wilson21ab4e72014-09-09 11:16:08 +01003944 if (val & DROP_UNBOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003945 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00003946
Chris Wilson8eadc192017-03-08 14:46:22 +00003947 if (val & DROP_SHRINK_ALL)
Chris Wilson6b048702018-09-03 09:33:37 +01003948 i915_gem_shrink_all(i915);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003949 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00003950
Chris Wilson4dfacb02018-05-31 09:22:43 +01003951 if (val & DROP_IDLE) {
3952 do {
Chris Wilson6b048702018-09-03 09:33:37 +01003953 if (READ_ONCE(i915->gt.active_requests))
3954 flush_delayed_work(&i915->gt.retire_work);
3955 drain_delayed_work(&i915->gt.idle_work);
3956 } while (READ_ONCE(i915->gt.awake));
Chris Wilson4dfacb02018-05-31 09:22:43 +01003957 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003958
Chris Wilsonc9c704712018-02-19 22:06:31 +00003959 if (val & DROP_FREED)
Chris Wilson6b048702018-09-03 09:33:37 +01003960 i915_gem_drain_freed_objects(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003961
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03003962out:
Chris Wilsona0371212019-01-14 14:21:14 +00003963 intel_runtime_pm_put(i915, wakeref);
Chris Wilson9d3eb2c2018-10-15 12:58:56 +01003964
Kees Cook647416f2013-03-10 14:10:06 -07003965 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00003966}
3967
Kees Cook647416f2013-03-10 14:10:06 -07003968DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3969 i915_drop_caches_get, i915_drop_caches_set,
3970 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00003971
Kees Cook647416f2013-03-10 14:10:06 -07003972static int
Kees Cook647416f2013-03-10 14:10:06 -07003973i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003974{
David Weinehall36cdd012016-08-22 13:59:31 +03003975 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003976 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00003977 u32 snpcr = 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003978
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003979 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003980 return -ENODEV;
3981
Chris Wilsond4225a52019-01-14 14:21:23 +00003982 with_intel_runtime_pm(dev_priv, wakeref)
3983 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003984
Kees Cook647416f2013-03-10 14:10:06 -07003985 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003986
Kees Cook647416f2013-03-10 14:10:06 -07003987 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003988}
3989
Kees Cook647416f2013-03-10 14:10:06 -07003990static int
3991i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003992{
David Weinehall36cdd012016-08-22 13:59:31 +03003993 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003994 intel_wakeref_t wakeref;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003995
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003996 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003997 return -ENODEV;
3998
Kees Cook647416f2013-03-10 14:10:06 -07003999 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004000 return -EINVAL;
4001
Kees Cook647416f2013-03-10 14:10:06 -07004002 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Chris Wilsond4225a52019-01-14 14:21:23 +00004003 with_intel_runtime_pm(dev_priv, wakeref) {
4004 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004005
Chris Wilsond4225a52019-01-14 14:21:23 +00004006 /* Update the cache sharing policy here as well */
4007 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4008 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4009 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
4010 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4011 }
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004012
Kees Cook647416f2013-03-10 14:10:06 -07004013 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004014}
4015
Kees Cook647416f2013-03-10 14:10:06 -07004016DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4017 i915_cache_sharing_get, i915_cache_sharing_set,
4018 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004019
David Weinehall36cdd012016-08-22 13:59:31 +03004020static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004021 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004022{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004023#define SS_MAX 2
4024 const int ss_max = SS_MAX;
4025 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004026 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004027
4028 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4029 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4030 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4031 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4032
4033 for (ss = 0; ss < ss_max; ss++) {
4034 unsigned int eu_cnt;
4035
4036 if (sig1[ss] & CHV_SS_PG_ENABLE)
4037 /* skip disabled subslice */
4038 continue;
4039
Imre Deakf08a0c92016-08-31 19:13:04 +03004040 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004041 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004042 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4043 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4044 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4045 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004046 sseu->eu_total += eu_cnt;
4047 sseu->eu_per_subslice = max_t(unsigned int,
4048 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004049 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004050#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004051}
4052
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004053static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4054 struct sseu_dev_info *sseu)
4055{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004056#define SS_MAX 6
Jani Nikula02584042018-12-31 16:56:41 +02004057 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004058 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004059 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004060
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004061 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004062 /*
4063 * FIXME: Valid SS Mask respects the spec and read
Alexandre Belloni3c64ea82018-11-20 16:14:15 +01004064 * only valid bits for those registers, excluding reserved
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004065 * although this seems wrong because it would leave many
4066 * subslices without ACK.
4067 */
4068 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4069 GEN10_PGCTL_VALID_SS_MASK(s);
4070 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4071 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4072 }
4073
4074 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4075 GEN9_PGCTL_SSA_EU19_ACK |
4076 GEN9_PGCTL_SSA_EU210_ACK |
4077 GEN9_PGCTL_SSA_EU311_ACK;
4078 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4079 GEN9_PGCTL_SSB_EU19_ACK |
4080 GEN9_PGCTL_SSB_EU210_ACK |
4081 GEN9_PGCTL_SSB_EU311_ACK;
4082
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004083 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004084 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4085 /* skip disabled slice */
4086 continue;
4087
4088 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004089 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004090
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004091 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004092 unsigned int eu_cnt;
4093
4094 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4095 /* skip disabled subslice */
4096 continue;
4097
4098 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4099 eu_mask[ss % 2]);
4100 sseu->eu_total += eu_cnt;
4101 sseu->eu_per_subslice = max_t(unsigned int,
4102 sseu->eu_per_subslice,
4103 eu_cnt);
4104 }
4105 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004106#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004107}
4108
David Weinehall36cdd012016-08-22 13:59:31 +03004109static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004110 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004111{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004112#define SS_MAX 3
Jani Nikula02584042018-12-31 16:56:41 +02004113 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004114 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004115 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004116
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004117 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004118 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4119 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4120 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4121 }
4122
Jeff McGee5d395252015-04-03 18:13:17 -07004123 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4124 GEN9_PGCTL_SSA_EU19_ACK |
4125 GEN9_PGCTL_SSA_EU210_ACK |
4126 GEN9_PGCTL_SSA_EU311_ACK;
4127 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4128 GEN9_PGCTL_SSB_EU19_ACK |
4129 GEN9_PGCTL_SSB_EU210_ACK |
4130 GEN9_PGCTL_SSB_EU311_ACK;
4131
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004132 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004133 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4134 /* skip disabled slice */
4135 continue;
4136
Imre Deakf08a0c92016-08-31 19:13:04 +03004137 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004138
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004139 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004140 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004141 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004142
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004143 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004144 unsigned int eu_cnt;
4145
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004146 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004147 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4148 /* skip disabled subslice */
4149 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004150
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004151 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004152 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004153
Jeff McGee5d395252015-04-03 18:13:17 -07004154 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4155 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004156 sseu->eu_total += eu_cnt;
4157 sseu->eu_per_subslice = max_t(unsigned int,
4158 sseu->eu_per_subslice,
4159 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004160 }
4161 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004162#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004163}
4164
David Weinehall36cdd012016-08-22 13:59:31 +03004165static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004166 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004167{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004168 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004169 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004170
Imre Deakf08a0c92016-08-31 19:13:04 +03004171 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004172
Imre Deakf08a0c92016-08-31 19:13:04 +03004173 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004174 sseu->eu_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004175 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004176 for (s = 0; s < fls(sseu->slice_mask); s++) {
4177 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004178 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004179 }
Imre Deak57ec1712016-08-31 19:13:05 +03004180 sseu->eu_total = sseu->eu_per_subslice *
4181 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004182
4183 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004184 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004185 u8 subslice_7eu =
Jani Nikula02584042018-12-31 16:56:41 +02004186 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004187
Imre Deak915490d2016-08-31 19:13:01 +03004188 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004189 }
4190 }
4191}
4192
Imre Deak615d8902016-08-31 19:13:03 +03004193static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4194 const struct sseu_dev_info *sseu)
4195{
4196 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4197 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004198 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004199
Imre Deakc67ba532016-08-31 19:13:06 +03004200 seq_printf(m, " %s Slice Mask: %04x\n", type,
4201 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004202 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004203 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004204 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004205 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004206 for (s = 0; s < fls(sseu->slice_mask); s++) {
4207 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4208 s, hweight8(sseu->subslice_mask[s]));
4209 }
Imre Deak615d8902016-08-31 19:13:03 +03004210 seq_printf(m, " %s EU Total: %u\n", type,
4211 sseu->eu_total);
4212 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4213 sseu->eu_per_subslice);
4214
4215 if (!is_available_info)
4216 return;
4217
4218 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4219 if (HAS_POOLED_EU(dev_priv))
4220 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4221
4222 seq_printf(m, " Has Slice Power Gating: %s\n",
4223 yesno(sseu->has_slice_pg));
4224 seq_printf(m, " Has Subslice Power Gating: %s\n",
4225 yesno(sseu->has_subslice_pg));
4226 seq_printf(m, " Has EU Power Gating: %s\n",
4227 yesno(sseu->has_eu_pg));
4228}
4229
Jeff McGee38732182015-02-13 10:27:54 -06004230static int i915_sseu_status(struct seq_file *m, void *unused)
4231{
David Weinehall36cdd012016-08-22 13:59:31 +03004232 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004233 struct sseu_dev_info sseu;
Chris Wilsona0371212019-01-14 14:21:14 +00004234 intel_wakeref_t wakeref;
Jeff McGee38732182015-02-13 10:27:54 -06004235
David Weinehall36cdd012016-08-22 13:59:31 +03004236 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004237 return -ENODEV;
4238
4239 seq_puts(m, "SSEU Device Info\n");
Jani Nikula02584042018-12-31 16:56:41 +02004240 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004241
Jeff McGee7f992ab2015-02-13 10:27:55 -06004242 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004243 memset(&sseu, 0, sizeof(sseu));
Jani Nikula02584042018-12-31 16:56:41 +02004244 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4245 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004246 sseu.max_eus_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004247 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004248
Chris Wilsond4225a52019-01-14 14:21:23 +00004249 with_intel_runtime_pm(dev_priv, wakeref) {
4250 if (IS_CHERRYVIEW(dev_priv))
4251 cherryview_sseu_device_status(dev_priv, &sseu);
4252 else if (IS_BROADWELL(dev_priv))
4253 broadwell_sseu_device_status(dev_priv, &sseu);
4254 else if (IS_GEN(dev_priv, 9))
4255 gen9_sseu_device_status(dev_priv, &sseu);
4256 else if (INTEL_GEN(dev_priv) >= 10)
4257 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004258 }
David Weinehall238010e2016-08-01 17:33:27 +03004259
Imre Deak615d8902016-08-31 19:13:03 +03004260 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004261
Jeff McGee38732182015-02-13 10:27:54 -06004262 return 0;
4263}
4264
Ben Widawsky6d794d42011-04-25 11:25:56 -07004265static int i915_forcewake_open(struct inode *inode, struct file *file)
4266{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004267 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004268
Chris Wilsond7a133d2017-09-07 14:44:41 +01004269 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004270 return 0;
4271
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004272 file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
Chris Wilsond7a133d2017-09-07 14:44:41 +01004273 intel_uncore_forcewake_user_get(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004274
4275 return 0;
4276}
4277
Ben Widawskyc43b5632012-04-16 14:07:40 -07004278static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004279{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004280 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004281
Chris Wilsond7a133d2017-09-07 14:44:41 +01004282 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004283 return 0;
4284
Chris Wilsond7a133d2017-09-07 14:44:41 +01004285 intel_uncore_forcewake_user_put(i915);
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004286 intel_runtime_pm_put(i915,
4287 (intel_wakeref_t)(uintptr_t)file->private_data);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004288
4289 return 0;
4290}
4291
4292static const struct file_operations i915_forcewake_fops = {
4293 .owner = THIS_MODULE,
4294 .open = i915_forcewake_open,
4295 .release = i915_forcewake_release,
4296};
4297
Lyude317eaa92017-02-03 21:18:25 -05004298static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4299{
4300 struct drm_i915_private *dev_priv = m->private;
4301 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4302
Lyude Paul6fc5d782018-11-20 19:37:17 -05004303 /* Synchronize with everything first in case there's been an HPD
4304 * storm, but we haven't finished handling it in the kernel yet
4305 */
4306 synchronize_irq(dev_priv->drm.irq);
4307 flush_work(&dev_priv->hotplug.dig_port_work);
4308 flush_work(&dev_priv->hotplug.hotplug_work);
4309
Lyude317eaa92017-02-03 21:18:25 -05004310 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4311 seq_printf(m, "Detected: %s\n",
4312 yesno(delayed_work_pending(&hotplug->reenable_work)));
4313
4314 return 0;
4315}
4316
4317static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4318 const char __user *ubuf, size_t len,
4319 loff_t *offp)
4320{
4321 struct seq_file *m = file->private_data;
4322 struct drm_i915_private *dev_priv = m->private;
4323 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4324 unsigned int new_threshold;
4325 int i;
4326 char *newline;
4327 char tmp[16];
4328
4329 if (len >= sizeof(tmp))
4330 return -EINVAL;
4331
4332 if (copy_from_user(tmp, ubuf, len))
4333 return -EFAULT;
4334
4335 tmp[len] = '\0';
4336
4337 /* Strip newline, if any */
4338 newline = strchr(tmp, '\n');
4339 if (newline)
4340 *newline = '\0';
4341
4342 if (strcmp(tmp, "reset") == 0)
4343 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4344 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4345 return -EINVAL;
4346
4347 if (new_threshold > 0)
4348 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4349 new_threshold);
4350 else
4351 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4352
4353 spin_lock_irq(&dev_priv->irq_lock);
4354 hotplug->hpd_storm_threshold = new_threshold;
4355 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4356 for_each_hpd_pin(i)
4357 hotplug->stats[i].count = 0;
4358 spin_unlock_irq(&dev_priv->irq_lock);
4359
4360 /* Re-enable hpd immediately if we were in an irq storm */
4361 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4362
4363 return len;
4364}
4365
4366static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4367{
4368 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4369}
4370
4371static const struct file_operations i915_hpd_storm_ctl_fops = {
4372 .owner = THIS_MODULE,
4373 .open = i915_hpd_storm_ctl_open,
4374 .read = seq_read,
4375 .llseek = seq_lseek,
4376 .release = single_release,
4377 .write = i915_hpd_storm_ctl_write
4378};
4379
Lyude Paul9a64c652018-11-06 16:30:16 -05004380static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4381{
4382 struct drm_i915_private *dev_priv = m->private;
4383
4384 seq_printf(m, "Enabled: %s\n",
4385 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4386
4387 return 0;
4388}
4389
4390static int
4391i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4392{
4393 return single_open(file, i915_hpd_short_storm_ctl_show,
4394 inode->i_private);
4395}
4396
4397static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4398 const char __user *ubuf,
4399 size_t len, loff_t *offp)
4400{
4401 struct seq_file *m = file->private_data;
4402 struct drm_i915_private *dev_priv = m->private;
4403 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4404 char *newline;
4405 char tmp[16];
4406 int i;
4407 bool new_state;
4408
4409 if (len >= sizeof(tmp))
4410 return -EINVAL;
4411
4412 if (copy_from_user(tmp, ubuf, len))
4413 return -EFAULT;
4414
4415 tmp[len] = '\0';
4416
4417 /* Strip newline, if any */
4418 newline = strchr(tmp, '\n');
4419 if (newline)
4420 *newline = '\0';
4421
4422 /* Reset to the "default" state for this system */
4423 if (strcmp(tmp, "reset") == 0)
4424 new_state = !HAS_DP_MST(dev_priv);
4425 else if (kstrtobool(tmp, &new_state) != 0)
4426 return -EINVAL;
4427
4428 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4429 new_state ? "En" : "Dis");
4430
4431 spin_lock_irq(&dev_priv->irq_lock);
4432 hotplug->hpd_short_storm_enabled = new_state;
4433 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4434 for_each_hpd_pin(i)
4435 hotplug->stats[i].count = 0;
4436 spin_unlock_irq(&dev_priv->irq_lock);
4437
4438 /* Re-enable hpd immediately if we were in an irq storm */
4439 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4440
4441 return len;
4442}
4443
4444static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4445 .owner = THIS_MODULE,
4446 .open = i915_hpd_short_storm_ctl_open,
4447 .read = seq_read,
4448 .llseek = seq_lseek,
4449 .release = single_release,
4450 .write = i915_hpd_short_storm_ctl_write,
4451};
4452
C, Ramalingam35954e82017-11-08 00:08:23 +05304453static int i915_drrs_ctl_set(void *data, u64 val)
4454{
4455 struct drm_i915_private *dev_priv = data;
4456 struct drm_device *dev = &dev_priv->drm;
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004457 struct intel_crtc *crtc;
C, Ramalingam35954e82017-11-08 00:08:23 +05304458
4459 if (INTEL_GEN(dev_priv) < 7)
4460 return -ENODEV;
4461
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004462 for_each_intel_crtc(dev, crtc) {
4463 struct drm_connector_list_iter conn_iter;
4464 struct intel_crtc_state *crtc_state;
4465 struct drm_connector *connector;
4466 struct drm_crtc_commit *commit;
4467 int ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304468
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004469 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4470 if (ret)
4471 return ret;
4472
4473 crtc_state = to_intel_crtc_state(crtc->base.state);
4474
4475 if (!crtc_state->base.active ||
4476 !crtc_state->has_drrs)
4477 goto out;
4478
4479 commit = crtc_state->base.commit;
4480 if (commit) {
4481 ret = wait_for_completion_interruptible(&commit->hw_done);
4482 if (ret)
4483 goto out;
4484 }
4485
4486 drm_connector_list_iter_begin(dev, &conn_iter);
4487 drm_for_each_connector_iter(connector, &conn_iter) {
4488 struct intel_encoder *encoder;
4489 struct intel_dp *intel_dp;
4490
4491 if (!(crtc_state->base.connector_mask &
4492 drm_connector_mask(connector)))
4493 continue;
4494
4495 encoder = intel_attached_encoder(connector);
C, Ramalingam35954e82017-11-08 00:08:23 +05304496 if (encoder->type != INTEL_OUTPUT_EDP)
4497 continue;
4498
4499 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4500 val ? "en" : "dis", val);
4501
4502 intel_dp = enc_to_intel_dp(&encoder->base);
4503 if (val)
4504 intel_edp_drrs_enable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004505 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304506 else
4507 intel_edp_drrs_disable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004508 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304509 }
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004510 drm_connector_list_iter_end(&conn_iter);
4511
4512out:
4513 drm_modeset_unlock(&crtc->base.mutex);
4514 if (ret)
4515 return ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304516 }
C, Ramalingam35954e82017-11-08 00:08:23 +05304517
4518 return 0;
4519}
4520
4521DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4522
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004523static ssize_t
4524i915_fifo_underrun_reset_write(struct file *filp,
4525 const char __user *ubuf,
4526 size_t cnt, loff_t *ppos)
4527{
4528 struct drm_i915_private *dev_priv = filp->private_data;
4529 struct intel_crtc *intel_crtc;
4530 struct drm_device *dev = &dev_priv->drm;
4531 int ret;
4532 bool reset;
4533
4534 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4535 if (ret)
4536 return ret;
4537
4538 if (!reset)
4539 return cnt;
4540
4541 for_each_intel_crtc(dev, intel_crtc) {
4542 struct drm_crtc_commit *commit;
4543 struct intel_crtc_state *crtc_state;
4544
4545 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4546 if (ret)
4547 return ret;
4548
4549 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4550 commit = crtc_state->base.commit;
4551 if (commit) {
4552 ret = wait_for_completion_interruptible(&commit->hw_done);
4553 if (!ret)
4554 ret = wait_for_completion_interruptible(&commit->flip_done);
4555 }
4556
4557 if (!ret && crtc_state->base.active) {
4558 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4559 pipe_name(intel_crtc->pipe));
4560
4561 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4562 }
4563
4564 drm_modeset_unlock(&intel_crtc->base.mutex);
4565
4566 if (ret)
4567 return ret;
4568 }
4569
4570 ret = intel_fbc_reset_underrun(dev_priv);
4571 if (ret)
4572 return ret;
4573
4574 return cnt;
4575}
4576
4577static const struct file_operations i915_fifo_underrun_reset_ops = {
4578 .owner = THIS_MODULE,
4579 .open = simple_open,
4580 .write = i915_fifo_underrun_reset_write,
4581 .llseek = default_llseek,
4582};
4583
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004584static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004585 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004586 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004587 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004588 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004589 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004590 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004591 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004592 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004593 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004594 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004595 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004596 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004597 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304598 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004599 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004600 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004601 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004602 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004603 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004604 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004605 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004606 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004607 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004608 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004609 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004610 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004611 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004612 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004613 {"i915_swizzle_info", i915_swizzle_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004614 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004615 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004616 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004617 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004618 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004619 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004620 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004621 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004622 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004623 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004624 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004625 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004626 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004627 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004628 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304629 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004630 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004631};
Ben Gamari27c202a2009-07-01 22:26:52 -04004632#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004633
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004634static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004635 const char *name;
4636 const struct file_operations *fops;
4637} i915_debugfs_files[] = {
4638 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004639 {"i915_cache_sharing", &i915_cache_sharing_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004640 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004641#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004642 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004643 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004644#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004645 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004646 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4647 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4648 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004649 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004650 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4651 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304652 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004653 {"i915_guc_log_level", &i915_guc_log_level_fops},
4654 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304655 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
Lyude Paul9a64c652018-11-06 16:30:16 -05004656 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304657 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004658 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4659 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004660};
4661
Chris Wilson1dac8912016-06-24 14:00:17 +01004662int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004663{
Chris Wilson91c8a322016-07-05 10:40:23 +01004664 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004665 struct dentry *ent;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004666 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004667
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004668 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4669 minor->debugfs_root, to_i915(minor->dev),
4670 &i915_forcewake_fops);
4671 if (!ent)
4672 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004673
Daniel Vetter34b96742013-07-04 20:49:44 +02004674 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004675 ent = debugfs_create_file(i915_debugfs_files[i].name,
4676 S_IRUGO | S_IWUSR,
4677 minor->debugfs_root,
4678 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004679 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004680 if (!ent)
4681 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004682 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004683
Ben Gamari27c202a2009-07-01 22:26:52 -04004684 return drm_debugfs_create_files(i915_debugfs_list,
4685 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004686 minor->debugfs_root, minor);
4687}
4688
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004689struct dpcd_block {
4690 /* DPCD dump start address. */
4691 unsigned int offset;
4692 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4693 unsigned int end;
4694 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4695 size_t size;
4696 /* Only valid for eDP. */
4697 bool edp;
4698};
4699
4700static const struct dpcd_block i915_dpcd_debug[] = {
4701 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4702 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4703 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4704 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4705 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4706 { .offset = DP_SET_POWER },
4707 { .offset = DP_EDP_DPCD_REV },
4708 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4709 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4710 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4711};
4712
4713static int i915_dpcd_show(struct seq_file *m, void *data)
4714{
4715 struct drm_connector *connector = m->private;
4716 struct intel_dp *intel_dp =
4717 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Jani Nikulae5315212019-01-16 11:15:23 +02004718 u8 buf[16];
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004719 ssize_t err;
4720 int i;
4721
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004722 if (connector->status != connector_status_connected)
4723 return -ENODEV;
4724
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004725 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4726 const struct dpcd_block *b = &i915_dpcd_debug[i];
4727 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4728
4729 if (b->edp &&
4730 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4731 continue;
4732
4733 /* low tech for now */
4734 if (WARN_ON(size > sizeof(buf)))
4735 continue;
4736
4737 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
Chris Wilson65404c82018-10-10 09:17:06 +01004738 if (err < 0)
4739 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4740 else
4741 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004742 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004743
4744 return 0;
4745}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004746DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004747
David Weinehallecbd6782016-08-23 12:23:56 +03004748static int i915_panel_show(struct seq_file *m, void *data)
4749{
4750 struct drm_connector *connector = m->private;
4751 struct intel_dp *intel_dp =
4752 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4753
4754 if (connector->status != connector_status_connected)
4755 return -ENODEV;
4756
4757 seq_printf(m, "Panel power up delay: %d\n",
4758 intel_dp->panel_power_up_delay);
4759 seq_printf(m, "Panel power down delay: %d\n",
4760 intel_dp->panel_power_down_delay);
4761 seq_printf(m, "Backlight on delay: %d\n",
4762 intel_dp->backlight_on_delay);
4763 seq_printf(m, "Backlight off delay: %d\n",
4764 intel_dp->backlight_off_delay);
4765
4766 return 0;
4767}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004768DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004769
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304770static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4771{
4772 struct drm_connector *connector = m->private;
4773 struct intel_connector *intel_connector = to_intel_connector(connector);
4774
4775 if (connector->status != connector_status_connected)
4776 return -ENODEV;
4777
4778 /* HDCP is supported by connector */
Ramalingam Cd3dacc72018-10-29 15:15:46 +05304779 if (!intel_connector->hdcp.shim)
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304780 return -EINVAL;
4781
4782 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4783 connector->base.id);
4784 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4785 "None" : "HDCP1.4");
4786 seq_puts(m, "\n");
4787
4788 return 0;
4789}
4790DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4791
Manasi Navaree845f092018-12-05 16:54:07 -08004792static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4793{
4794 struct drm_connector *connector = m->private;
4795 struct drm_device *dev = connector->dev;
4796 struct drm_crtc *crtc;
4797 struct intel_dp *intel_dp;
4798 struct drm_modeset_acquire_ctx ctx;
4799 struct intel_crtc_state *crtc_state = NULL;
4800 int ret = 0;
4801 bool try_again = false;
4802
4803 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4804
4805 do {
Manasi Navare6afe8922018-12-19 15:51:20 -08004806 try_again = false;
Manasi Navaree845f092018-12-05 16:54:07 -08004807 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4808 &ctx);
4809 if (ret) {
4810 ret = -EINTR;
4811 break;
4812 }
4813 crtc = connector->state->crtc;
4814 if (connector->status != connector_status_connected || !crtc) {
4815 ret = -ENODEV;
4816 break;
4817 }
4818 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4819 if (ret == -EDEADLK) {
4820 ret = drm_modeset_backoff(&ctx);
4821 if (!ret) {
4822 try_again = true;
4823 continue;
4824 }
4825 break;
4826 } else if (ret) {
4827 break;
4828 }
4829 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4830 crtc_state = to_intel_crtc_state(crtc->state);
4831 seq_printf(m, "DSC_Enabled: %s\n",
4832 yesno(crtc_state->dsc_params.compression_enable));
Radhakrishna Sripadafed85692019-01-09 13:14:14 -08004833 seq_printf(m, "DSC_Sink_Support: %s\n",
4834 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
Manasi Navaree845f092018-12-05 16:54:07 -08004835 if (!intel_dp_is_edp(intel_dp))
4836 seq_printf(m, "FEC_Sink_Support: %s\n",
4837 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4838 } while (try_again);
4839
4840 drm_modeset_drop_locks(&ctx);
4841 drm_modeset_acquire_fini(&ctx);
4842
4843 return ret;
4844}
4845
4846static ssize_t i915_dsc_fec_support_write(struct file *file,
4847 const char __user *ubuf,
4848 size_t len, loff_t *offp)
4849{
4850 bool dsc_enable = false;
4851 int ret;
4852 struct drm_connector *connector =
4853 ((struct seq_file *)file->private_data)->private;
4854 struct intel_encoder *encoder = intel_attached_encoder(connector);
4855 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4856
4857 if (len == 0)
4858 return 0;
4859
4860 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4861 len);
4862
4863 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4864 if (ret < 0)
4865 return ret;
4866
4867 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4868 (dsc_enable) ? "true" : "false");
4869 intel_dp->force_dsc_en = dsc_enable;
4870
4871 *offp += len;
4872 return len;
4873}
4874
4875static int i915_dsc_fec_support_open(struct inode *inode,
4876 struct file *file)
4877{
4878 return single_open(file, i915_dsc_fec_support_show,
4879 inode->i_private);
4880}
4881
4882static const struct file_operations i915_dsc_fec_support_fops = {
4883 .owner = THIS_MODULE,
4884 .open = i915_dsc_fec_support_open,
4885 .read = seq_read,
4886 .llseek = seq_lseek,
4887 .release = single_release,
4888 .write = i915_dsc_fec_support_write
4889};
4890
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004891/**
4892 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4893 * @connector: pointer to a registered drm_connector
4894 *
4895 * Cleanup will be done by drm_connector_unregister() through a call to
4896 * drm_debugfs_connector_remove().
4897 *
4898 * Returns 0 on success, negative error codes on error.
4899 */
4900int i915_debugfs_connector_add(struct drm_connector *connector)
4901{
4902 struct dentry *root = connector->debugfs_entry;
Manasi Navaree845f092018-12-05 16:54:07 -08004903 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004904
4905 /* The connector must have been registered beforehands. */
4906 if (!root)
4907 return -ENODEV;
4908
4909 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4910 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004911 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4912 connector, &i915_dpcd_fops);
4913
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004914 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
David Weinehallecbd6782016-08-23 12:23:56 +03004915 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4916 connector, &i915_panel_fops);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004917 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4918 connector, &i915_psr_sink_status_fops);
4919 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004920
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304921 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4922 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4923 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4924 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4925 connector, &i915_hdcp_sink_capability_fops);
4926 }
4927
Manasi Navaree845f092018-12-05 16:54:07 -08004928 if (INTEL_GEN(dev_priv) >= 10 &&
4929 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4930 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4931 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4932 connector, &i915_dsc_fec_support_fops);
4933
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004934 return 0;
4935}