blob: 544e5e7f011fb7b3b9d9f6475af6431db93f6c85 [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsonf3cd4742009-10-13 22:20:20 +010029#include <linux/debugfs.h>
Chris Wilsone637d2c2017-03-16 13:19:57 +000030#include <linux/sort.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010031#include <linux/sched/mm.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010032#include "intel_drv.h"
Sagar Arun Kamblea2695742017-11-16 19:02:41 +053033#include "intel_guc_submission.h"
Ben Gamari20172632009-02-17 20:08:50 -050034
David Weinehall36cdd012016-08-22 13:59:31 +030035static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36{
37 return to_i915(node->minor->dev);
38}
39
Chris Wilson70d39fe2010-08-25 16:03:34 +010040static int i915_capabilities(struct seq_file *m, void *data)
41{
David Weinehall36cdd012016-08-22 13:59:31 +030042 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000044 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010045
David Weinehall36cdd012016-08-22 13:59:31 +030046 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020047 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030048 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000049
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000050 intel_device_info_dump_flags(info, &p);
Michal Wajdeczko5fbbe8d2017-12-21 21:57:34 +000051 intel_device_info_dump_runtime(info, &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000052 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010053
Chris Wilson418e3cd2017-02-06 21:36:08 +000054 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000055 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000056 kernel_param_unlock(THIS_MODULE);
57
Chris Wilson70d39fe2010-08-25 16:03:34 +010058 return 0;
59}
Ben Gamari433e12f2009-02-17 20:08:51 -050060
Imre Deaka7363de2016-05-12 16:18:52 +030061static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000062{
Chris Wilson573adb32016-08-04 16:32:39 +010063 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000064}
65
Imre Deaka7363de2016-05-12 16:18:52 +030066static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010067{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010068 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010069}
70
Imre Deaka7363de2016-05-12 16:18:52 +030071static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000072{
Chris Wilson3e510a82016-08-05 10:14:23 +010073 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040074 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010075 case I915_TILING_NONE: return ' ';
76 case I915_TILING_X: return 'X';
77 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040078 }
Chris Wilsona6172a82009-02-11 14:26:38 +000079}
80
Imre Deaka7363de2016-05-12 16:18:52 +030081static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070082{
Chris Wilsona65adaf2017-10-09 09:43:57 +010083 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010084}
85
Imre Deaka7363de2016-05-12 16:18:52 +030086static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010087{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010088 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -070089}
90
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010091static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92{
93 u64 size = 0;
94 struct i915_vma *vma;
95
Chris Wilsone2189dd2017-12-07 21:14:07 +000096 for_each_ggtt_vma(vma, obj) {
97 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010098 size += vma->node.size;
99 }
100
101 return size;
102}
103
Matthew Auld7393b7e2017-10-06 23:18:28 +0100104static const char *
105stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106{
107 size_t x = 0;
108
109 switch (page_sizes) {
110 case 0:
111 return "";
112 case I915_GTT_PAGE_SIZE_4K:
113 return "4K";
114 case I915_GTT_PAGE_SIZE_64K:
115 return "64K";
116 case I915_GTT_PAGE_SIZE_2M:
117 return "2M";
118 default:
119 if (!buf)
120 return "M";
121
122 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 x += snprintf(buf + x, len - x, "2M, ");
124 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 x += snprintf(buf + x, len - x, "64K, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 x += snprintf(buf + x, len - x, "4K, ");
128 buf[x-2] = '\0';
129
130 return buf;
131 }
132}
133
Chris Wilson37811fc2010-08-25 22:45:57 +0100134static void
135describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136{
Chris Wilsonb4716182015-04-27 13:41:17 +0100137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000138 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700139 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100140 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800141 int pin_count = 0;
142
Chris Wilson188c1ab2016-04-03 14:14:20 +0100143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144
Chris Wilsond07f0e52016-10-28 13:58:44 +0100145 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100146 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100147 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100148 get_pin_flag(obj),
149 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700150 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100151 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800152 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100153 obj->read_domains,
154 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300155 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100156 obj->mm.dirty ? " dirty" : "",
157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100158 if (obj->base.name)
159 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000160 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100161 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800162 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300163 }
164 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100165 if (obj->pin_global)
166 seq_printf(m, " (global)");
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100168 if (!drm_mm_node_allocated(&vma->node))
169 continue;
170
Matthew Auld7393b7e2017-10-06 23:18:28 +0100171 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100172 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100173 vma->node.start, vma->node.size,
174 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000175 if (i915_vma_is_ggtt(vma)) {
176 switch (vma->ggtt_view.type) {
177 case I915_GGTT_VIEW_NORMAL:
178 seq_puts(m, ", normal");
179 break;
180
181 case I915_GGTT_VIEW_PARTIAL:
182 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000183 vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000185 break;
186
187 case I915_GGTT_VIEW_ROTATED:
188 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000189 vma->ggtt_view.rotated.plane[0].width,
190 vma->ggtt_view.rotated.plane[0].height,
191 vma->ggtt_view.rotated.plane[0].stride,
192 vma->ggtt_view.rotated.plane[0].offset,
193 vma->ggtt_view.rotated.plane[1].width,
194 vma->ggtt_view.rotated.plane[1].height,
195 vma->ggtt_view.rotated.plane[1].stride,
196 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000197 break;
198
199 default:
200 MISSING_CASE(vma->ggtt_view.type);
201 break;
202 }
203 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100204 if (vma->fence)
205 seq_printf(m, " , fence: %d%s",
206 vma->fence->id,
207 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000208 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700209 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000210 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100211 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100212
Chris Wilsond07f0e52016-10-28 13:58:44 +0100213 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100214 if (engine)
215 seq_printf(m, " (%s)", engine->name);
216
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100217 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 if (frontbuffer_bits)
219 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100220}
221
Chris Wilsone637d2c2017-03-16 13:19:57 +0000222static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100223{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000224 const struct drm_i915_gem_object *a =
225 *(const struct drm_i915_gem_object **)A;
226 const struct drm_i915_gem_object *b =
227 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100228
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200229 if (a->stolen->start < b->stolen->start)
230 return -1;
231 if (a->stolen->start > b->stolen->start)
232 return 1;
233 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100234}
235
236static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237{
David Weinehall36cdd012016-08-22 13:59:31 +0300238 struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000240 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100241 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300242 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000243 unsigned long total, count, n;
244 int ret;
245
246 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200247 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000248 if (!objects)
249 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100250
251 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000253 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100254
255 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100256
257 spin_lock(&dev_priv->mm.obj_lock);
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000259 if (count == total)
260 break;
261
Chris Wilson6d2b88852013-08-07 18:30:54 +0100262 if (obj->stolen == NULL)
263 continue;
264
Chris Wilsone637d2c2017-03-16 13:19:57 +0000265 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100266 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100267 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000268
Chris Wilson6d2b88852013-08-07 18:30:54 +0100269 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100270 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000271 if (count == total)
272 break;
273
Chris Wilson6d2b88852013-08-07 18:30:54 +0100274 if (obj->stolen == NULL)
275 continue;
276
Chris Wilsone637d2c2017-03-16 13:19:57 +0000277 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100278 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100279 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100280 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100281
Chris Wilsone637d2c2017-03-16 13:19:57 +0000282 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284 seq_puts(m, "Stolen:\n");
285 for (n = 0; n < count; n++) {
286 seq_puts(m, " ");
287 describe_obj(m, objects[n]);
288 seq_putc(m, '\n');
289 }
290 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100291 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000292
293 mutex_unlock(&dev->struct_mutex);
294out:
Michal Hocko20981052017-05-17 14:23:12 +0200295 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000296 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100297}
298
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100299struct file_stats {
Chris Wilson6313c202014-03-19 13:45:45 +0000300 struct drm_i915_file_private *file_priv;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300301 unsigned long count;
302 u64 total, unbound;
303 u64 global, shared;
304 u64 active, inactive;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100305};
306
307static int per_file_stats(int id, void *ptr, void *data)
308{
309 struct drm_i915_gem_object *obj = ptr;
310 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000311 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100312
Chris Wilson0caf81b2017-06-17 12:57:44 +0100313 lockdep_assert_held(&obj->base.dev->struct_mutex);
314
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100315 stats->count++;
316 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100317 if (!obj->bind_count)
318 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000319 if (obj->base.name || obj->base.dma_buf)
320 stats->shared += obj->base.size;
321
Chris Wilson894eeec2016-08-04 07:52:20 +0100322 list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 if (!drm_mm_node_allocated(&vma->node))
324 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000325
Chris Wilson3272db52016-08-04 16:32:32 +0100326 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100327 stats->global += vma->node.size;
328 } else {
329 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
Chris Wilson6313c202014-03-19 13:45:45 +0000330
Chris Wilson82ad6442018-06-05 16:37:58 +0100331 if (ppgtt->vm.file != stats->file_priv)
Chris Wilson6313c202014-03-19 13:45:45 +0000332 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000333 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100334
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100335 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100336 stats->active += vma->node.size;
337 else
338 stats->inactive += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100339 }
340
341 return 0;
342}
343
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100344#define print_file_stats(m, name, stats) do { \
345 if (stats.count) \
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100347 name, \
348 stats.count, \
349 stats.total, \
350 stats.active, \
351 stats.inactive, \
352 stats.global, \
353 stats.shared, \
354 stats.unbound); \
355} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800356
357static void print_batch_pool_stats(struct seq_file *m,
358 struct drm_i915_private *dev_priv)
359{
360 struct drm_i915_gem_object *obj;
361 struct file_stats stats;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000362 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530363 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000364 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800365
366 memset(&stats, 0, sizeof(stats));
367
Akash Goel3b3f1652016-10-13 22:44:48 +0530368 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000369 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100370 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000371 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100372 batch_pool_link)
373 per_file_stats(0, obj, &stats);
374 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100375 }
Brad Volkin493018d2014-12-11 12:13:08 -0800376
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100377 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800378}
379
Chris Wilsonab82a062018-04-30 14:15:01 +0100380static int per_file_ctx_stats(int idx, void *ptr, void *data)
Chris Wilson15da9562016-05-24 14:53:43 +0100381{
382 struct i915_gem_context *ctx = ptr;
Chris Wilsonab82a062018-04-30 14:15:01 +0100383 struct intel_engine_cs *engine;
384 enum intel_engine_id id;
Chris Wilson15da9562016-05-24 14:53:43 +0100385
Chris Wilsonab82a062018-04-30 14:15:01 +0100386 for_each_engine(engine, ctx->i915, id) {
387 struct intel_context *ce = to_intel_context(ctx, engine);
388
389 if (ce->state)
390 per_file_stats(0, ce->state->obj, data);
391 if (ce->ring)
392 per_file_stats(0, ce->ring->vma->obj, data);
Chris Wilson15da9562016-05-24 14:53:43 +0100393 }
394
395 return 0;
396}
397
398static void print_context_stats(struct seq_file *m,
399 struct drm_i915_private *dev_priv)
400{
David Weinehall36cdd012016-08-22 13:59:31 +0300401 struct drm_device *dev = &dev_priv->drm;
Chris Wilson15da9562016-05-24 14:53:43 +0100402 struct file_stats stats;
403 struct drm_file *file;
404
405 memset(&stats, 0, sizeof(stats));
406
David Weinehall36cdd012016-08-22 13:59:31 +0300407 mutex_lock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100408 if (dev_priv->kernel_context)
409 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
David Weinehall36cdd012016-08-22 13:59:31 +0300411 list_for_each_entry(file, &dev->filelist, lhead) {
Chris Wilson15da9562016-05-24 14:53:43 +0100412 struct drm_i915_file_private *fpriv = file->driver_priv;
413 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 }
David Weinehall36cdd012016-08-22 13:59:31 +0300415 mutex_unlock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100416
417 print_file_stats(m, "[k]contexts", stats);
418}
419
David Weinehall36cdd012016-08-22 13:59:31 +0300420static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100421{
David Weinehall36cdd012016-08-22 13:59:31 +0300422 struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300424 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100425 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000427 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100428 unsigned int page_sizes = 0;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100429 struct drm_file *file;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100430 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100431 int ret;
432
433 ret = mutex_lock_interruptible(&dev->struct_mutex);
434 if (ret)
435 return ret;
436
Chris Wilson3ef7f222016-10-18 13:02:48 +0100437 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000438 dev_priv->mm.object_count,
439 dev_priv->mm.object_memory);
440
Chris Wilson1544c422016-08-15 13:18:16 +0100441 size = count = 0;
442 mapped_size = mapped_count = 0;
443 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100444 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100445
446 spin_lock(&dev_priv->mm.obj_lock);
447 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100448 size += obj->base.size;
449 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200450
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100451 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200452 purgeable_size += obj->base.size;
453 ++purgeable_count;
454 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100455
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100456 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100457 mapped_count++;
458 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100459 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100460
461 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 huge_count++;
463 huge_size += obj->base.size;
464 page_sizes |= obj->mm.page_sizes.sg;
465 }
Chris Wilson6299f992010-11-24 12:23:44 +0000466 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100467 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468
469 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100470 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100471 size += obj->base.size;
472 ++count;
473
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100474 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100475 dpy_size += obj->base.size;
476 ++dpy_count;
477 }
478
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100479 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100480 purgeable_size += obj->base.size;
481 ++purgeable_count;
482 }
483
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100484 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100485 mapped_count++;
486 mapped_size += obj->base.size;
487 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100488
489 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 huge_count++;
491 huge_size += obj->base.size;
492 page_sizes |= obj->mm.page_sizes.sg;
493 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100494 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100495 spin_unlock(&dev_priv->mm.obj_lock);
496
Chris Wilson2bd160a2016-08-15 10:48:45 +0100497 seq_printf(m, "%u bound objects, %llu bytes\n",
498 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300499 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200500 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100501 seq_printf(m, "%u mapped objects, %llu bytes\n",
502 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100503 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 huge_count,
505 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100507 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100508 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000509
Matthew Auldb7128ef2017-12-11 15:18:22 +0000510 seq_printf(m, "%llu [%pa] gtt total\n",
Chris Wilson82ad6442018-06-05 16:37:58 +0100511 ggtt->vm.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100512 seq_printf(m, "Supported page sizes: %s\n",
513 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100515
Damien Lespiau267f0c92013-06-24 22:59:48 +0100516 seq_putc(m, '\n');
Brad Volkin493018d2014-12-11 12:13:08 -0800517 print_batch_pool_stats(m, dev_priv);
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200518 mutex_unlock(&dev->struct_mutex);
519
520 mutex_lock(&dev->filelist_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100521 print_context_stats(m, dev_priv);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100522 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 struct file_stats stats;
Chris Wilsonc84455b2016-08-15 10:49:08 +0100524 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsone61e0f52018-02-21 09:56:36 +0000525 struct i915_request *request;
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900526 struct task_struct *task;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100527
Chris Wilson0caf81b2017-06-17 12:57:44 +0100528 mutex_lock(&dev->struct_mutex);
529
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100530 memset(&stats, 0, sizeof(stats));
Chris Wilson6313c202014-03-19 13:45:45 +0000531 stats.file_priv = file->driver_priv;
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100532 spin_lock(&file->table_lock);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100533 idr_for_each(&file->object_idr, per_file_stats, &stats);
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100534 spin_unlock(&file->table_lock);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900535 /*
536 * Although we have a valid reference on file->pid, that does
537 * not guarantee that the task_struct who called get_pid() is
538 * still alive (e.g. get_pid(current) => fork() => exit()).
539 * Therefore, we need to protect this ->comm access using RCU.
540 */
Chris Wilsonc84455b2016-08-15 10:49:08 +0100541 request = list_first_entry_or_null(&file_priv->mm.request_list,
Chris Wilsone61e0f52018-02-21 09:56:36 +0000542 struct i915_request,
Chris Wilsonc8659ef2017-03-02 12:25:25 +0000543 client_link);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900544 rcu_read_lock();
Chris Wilson4e0d64d2018-05-17 22:26:30 +0100545 task = pid_task(request && request->gem_context->pid ?
546 request->gem_context->pid : file->pid,
Chris Wilsonc84455b2016-08-15 10:49:08 +0100547 PIDTYPE_PID);
Brad Volkin493018d2014-12-11 12:13:08 -0800548 print_file_stats(m, task ? task->comm : "<unknown>", stats);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900549 rcu_read_unlock();
Chris Wilson0caf81b2017-06-17 12:57:44 +0100550
Chris Wilsonc84455b2016-08-15 10:49:08 +0100551 mutex_unlock(&dev->struct_mutex);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100552 }
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200553 mutex_unlock(&dev->filelist_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100554
555 return 0;
556}
557
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100558static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000559{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100560 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300561 struct drm_i915_private *dev_priv = node_to_i915(node);
562 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100563 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000564 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300565 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100566 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000567 int count, ret;
568
Chris Wilsonf2123812017-10-16 12:40:37 +0100569 nobject = READ_ONCE(dev_priv->mm.object_count);
570 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 if (!objects)
572 return -ENOMEM;
573
Chris Wilson08c18322011-01-10 00:00:24 +0000574 ret = mutex_lock_interruptible(&dev->struct_mutex);
575 if (ret)
576 return ret;
577
Chris Wilsonf2123812017-10-16 12:40:37 +0100578 count = 0;
579 spin_lock(&dev_priv->mm.obj_lock);
580 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 objects[count++] = obj;
582 if (count == nobject)
583 break;
584 }
585 spin_unlock(&dev_priv->mm.obj_lock);
586
587 total_obj_size = total_gtt_size = 0;
588 for (n = 0; n < count; n++) {
589 obj = objects[n];
590
Damien Lespiau267f0c92013-06-24 22:59:48 +0100591 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000592 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100593 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000594 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100595 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000596 }
597
598 mutex_unlock(&dev->struct_mutex);
599
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300600 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000601 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100602 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000603
604 return 0;
605}
606
Brad Volkin493018d2014-12-11 12:13:08 -0800607static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608{
David Weinehall36cdd012016-08-22 13:59:31 +0300609 struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800611 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000612 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530613 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100614 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000615 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800616
617 ret = mutex_lock_interruptible(&dev->struct_mutex);
618 if (ret)
619 return ret;
620
Akash Goel3b3f1652016-10-13 22:44:48 +0530621 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000622 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100623 int count;
624
625 count = 0;
626 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000627 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100628 batch_pool_link)
629 count++;
630 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000631 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100632
633 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000634 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100635 batch_pool_link) {
636 seq_puts(m, " ");
637 describe_obj(m, obj);
638 seq_putc(m, '\n');
639 }
640
641 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100642 }
Brad Volkin493018d2014-12-11 12:13:08 -0800643 }
644
Chris Wilson8d9d5742015-04-07 16:20:38 +0100645 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800646
647 mutex_unlock(&dev->struct_mutex);
648
649 return 0;
650}
651
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200652static void gen8_display_interrupt_info(struct seq_file *m)
653{
654 struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 int pipe;
656
657 for_each_pipe(dev_priv, pipe) {
658 enum intel_display_power_domain power_domain;
659
660 power_domain = POWER_DOMAIN_PIPE(pipe);
661 if (!intel_display_power_get_if_enabled(dev_priv,
662 power_domain)) {
663 seq_printf(m, "Pipe %c power disabled\n",
664 pipe_name(pipe));
665 continue;
666 }
667 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 pipe_name(pipe),
669 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 pipe_name(pipe),
672 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 seq_printf(m, "Pipe %c IER:\t%08x\n",
674 pipe_name(pipe),
675 I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677 intel_display_power_put(dev_priv, power_domain);
678 }
679
680 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 I915_READ(GEN8_DE_PORT_IMR));
682 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 I915_READ(GEN8_DE_PORT_IIR));
684 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 I915_READ(GEN8_DE_PORT_IER));
686
687 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 I915_READ(GEN8_DE_MISC_IMR));
689 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 I915_READ(GEN8_DE_MISC_IIR));
691 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 I915_READ(GEN8_DE_MISC_IER));
693
694 seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 I915_READ(GEN8_PCU_IMR));
696 seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 I915_READ(GEN8_PCU_IIR));
698 seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 I915_READ(GEN8_PCU_IER));
700}
701
Ben Gamari20172632009-02-17 20:08:50 -0500702static int i915_interrupt_info(struct seq_file *m, void *data)
703{
David Weinehall36cdd012016-08-22 13:59:31 +0300704 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000705 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530706 enum intel_engine_id id;
Chris Wilson4bb05042016-09-03 07:53:43 +0100707 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100708
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200709 intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500710
David Weinehall36cdd012016-08-22 13:59:31 +0300711 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300712 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 I915_READ(GEN8_MASTER_IRQ));
714
715 seq_printf(m, "Display IER:\t%08x\n",
716 I915_READ(VLV_IER));
717 seq_printf(m, "Display IIR:\t%08x\n",
718 I915_READ(VLV_IIR));
719 seq_printf(m, "Display IIR_RW:\t%08x\n",
720 I915_READ(VLV_IIR_RW));
721 seq_printf(m, "Display IMR:\t%08x\n",
722 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100723 for_each_pipe(dev_priv, pipe) {
724 enum intel_display_power_domain power_domain;
725
726 power_domain = POWER_DOMAIN_PIPE(pipe);
727 if (!intel_display_power_get_if_enabled(dev_priv,
728 power_domain)) {
729 seq_printf(m, "Pipe %c power disabled\n",
730 pipe_name(pipe));
731 continue;
732 }
733
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300734 seq_printf(m, "Pipe %c stat:\t%08x\n",
735 pipe_name(pipe),
736 I915_READ(PIPESTAT(pipe)));
737
Chris Wilson9c870d02016-10-24 13:42:15 +0100738 intel_display_power_put(dev_priv, power_domain);
739 }
740
741 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300742 seq_printf(m, "Port hotplug:\t%08x\n",
743 I915_READ(PORT_HOTPLUG_EN));
744 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 I915_READ(VLV_DPFLIPSTAT));
746 seq_printf(m, "DPINVGTT:\t%08x\n",
747 I915_READ(DPINVGTT));
Chris Wilson9c870d02016-10-24 13:42:15 +0100748 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300749
750 for (i = 0; i < 4; i++) {
751 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 i, I915_READ(GEN8_GT_IMR(i)));
753 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 i, I915_READ(GEN8_GT_IIR(i)));
755 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 i, I915_READ(GEN8_GT_IER(i)));
757 }
758
759 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 I915_READ(GEN8_PCU_IMR));
761 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 I915_READ(GEN8_PCU_IIR));
763 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200765 } else if (INTEL_GEN(dev_priv) >= 11) {
766 seq_printf(m, "Master Interrupt Control: %08x\n",
767 I915_READ(GEN11_GFX_MSTR_IRQ));
768
769 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
770 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
772 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
774 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
778 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
780 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 I915_READ(GEN11_DISPLAY_INT_CTL));
784
785 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300786 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700787 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 I915_READ(GEN8_MASTER_IRQ));
789
790 for (i = 0; i < 4; i++) {
791 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 i, I915_READ(GEN8_GT_IMR(i)));
793 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 i, I915_READ(GEN8_GT_IIR(i)));
795 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 i, I915_READ(GEN8_GT_IER(i)));
797 }
798
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200799 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300800 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700801 seq_printf(m, "Display IER:\t%08x\n",
802 I915_READ(VLV_IER));
803 seq_printf(m, "Display IIR:\t%08x\n",
804 I915_READ(VLV_IIR));
805 seq_printf(m, "Display IIR_RW:\t%08x\n",
806 I915_READ(VLV_IIR_RW));
807 seq_printf(m, "Display IMR:\t%08x\n",
808 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000809 for_each_pipe(dev_priv, pipe) {
810 enum intel_display_power_domain power_domain;
811
812 power_domain = POWER_DOMAIN_PIPE(pipe);
813 if (!intel_display_power_get_if_enabled(dev_priv,
814 power_domain)) {
815 seq_printf(m, "Pipe %c power disabled\n",
816 pipe_name(pipe));
817 continue;
818 }
819
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700820 seq_printf(m, "Pipe %c stat:\t%08x\n",
821 pipe_name(pipe),
822 I915_READ(PIPESTAT(pipe)));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000823 intel_display_power_put(dev_priv, power_domain);
824 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700825
826 seq_printf(m, "Master IER:\t%08x\n",
827 I915_READ(VLV_MASTER_IER));
828
829 seq_printf(m, "Render IER:\t%08x\n",
830 I915_READ(GTIER));
831 seq_printf(m, "Render IIR:\t%08x\n",
832 I915_READ(GTIIR));
833 seq_printf(m, "Render IMR:\t%08x\n",
834 I915_READ(GTIMR));
835
836 seq_printf(m, "PM IER:\t\t%08x\n",
837 I915_READ(GEN6_PMIER));
838 seq_printf(m, "PM IIR:\t\t%08x\n",
839 I915_READ(GEN6_PMIIR));
840 seq_printf(m, "PM IMR:\t\t%08x\n",
841 I915_READ(GEN6_PMIMR));
842
843 seq_printf(m, "Port hotplug:\t%08x\n",
844 I915_READ(PORT_HOTPLUG_EN));
845 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 I915_READ(VLV_DPFLIPSTAT));
847 seq_printf(m, "DPINVGTT:\t%08x\n",
848 I915_READ(DPINVGTT));
849
David Weinehall36cdd012016-08-22 13:59:31 +0300850 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800851 seq_printf(m, "Interrupt enable: %08x\n",
852 I915_READ(IER));
853 seq_printf(m, "Interrupt identity: %08x\n",
854 I915_READ(IIR));
855 seq_printf(m, "Interrupt mask: %08x\n",
856 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100857 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800858 seq_printf(m, "Pipe %c stat: %08x\n",
859 pipe_name(pipe),
860 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800861 } else {
862 seq_printf(m, "North Display Interrupt enable: %08x\n",
863 I915_READ(DEIER));
864 seq_printf(m, "North Display Interrupt identity: %08x\n",
865 I915_READ(DEIIR));
866 seq_printf(m, "North Display Interrupt mask: %08x\n",
867 I915_READ(DEIMR));
868 seq_printf(m, "South Display Interrupt enable: %08x\n",
869 I915_READ(SDEIER));
870 seq_printf(m, "South Display Interrupt identity: %08x\n",
871 I915_READ(SDEIIR));
872 seq_printf(m, "South Display Interrupt mask: %08x\n",
873 I915_READ(SDEIMR));
874 seq_printf(m, "Graphics Interrupt enable: %08x\n",
875 I915_READ(GTIER));
876 seq_printf(m, "Graphics Interrupt identity: %08x\n",
877 I915_READ(GTIIR));
878 seq_printf(m, "Graphics Interrupt mask: %08x\n",
879 I915_READ(GTIMR));
880 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200881
882 if (INTEL_GEN(dev_priv) >= 11) {
883 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 I915_READ(GEN11_GUC_SG_INTR_MASK));
895 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000903 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100904 seq_printf(m,
905 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000906 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000907 }
Chris Wilson9862e602011-01-04 22:22:17 +0000908 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200909
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200910 intel_runtime_pm_put(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100911
Ben Gamari20172632009-02-17 20:08:50 -0500912 return 0;
913}
914
Chris Wilsona6172a82009-02-11 14:26:38 +0000915static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916{
David Weinehall36cdd012016-08-22 13:59:31 +0300917 struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100919 int i, ret;
920
921 ret = mutex_lock_interruptible(&dev->struct_mutex);
922 if (ret)
923 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000924
Chris Wilsona6172a82009-02-11 14:26:38 +0000925 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100927 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000928
Chris Wilson6c085a72012-08-20 11:40:46 +0200929 seq_printf(m, "Fence %d, pin count = %d, object = ",
930 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100931 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100932 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100933 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100934 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100935 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000936 }
937
Chris Wilson05394f32010-11-08 19:18:58 +0000938 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000939 return 0;
940}
941
Chris Wilson98a2f412016-10-12 10:05:18 +0100942#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000943static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 size_t count, loff_t *pos)
945{
946 struct i915_gpu_state *error = file->private_data;
947 struct drm_i915_error_state_buf str;
948 ssize_t ret;
949 loff_t tmp;
950
951 if (!error)
952 return 0;
953
954 ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955 if (ret)
956 return ret;
957
958 ret = i915_error_state_to_str(&str, error);
959 if (ret)
960 goto out;
961
962 tmp = 0;
963 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964 if (ret < 0)
965 goto out;
966
967 *pos = str.start + ret;
968out:
969 i915_error_state_buf_release(&str);
970 return ret;
971}
972
973static int gpu_state_release(struct inode *inode, struct file *file)
974{
975 i915_gpu_state_put(file->private_data);
976 return 0;
977}
978
979static int i915_gpu_info_open(struct inode *inode, struct file *file)
980{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100981 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000982 struct i915_gpu_state *gpu;
983
Chris Wilson090e5fe2017-03-28 14:14:07 +0100984 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000987 if (!gpu)
988 return -ENOMEM;
989
990 file->private_data = gpu;
991 return 0;
992}
993
994static const struct file_operations i915_gpu_info_fops = {
995 .owner = THIS_MODULE,
996 .open = i915_gpu_info_open,
997 .read = gpu_state_read,
998 .llseek = default_llseek,
999 .release = gpu_state_release,
1000};
Chris Wilson98a2f412016-10-12 10:05:18 +01001001
Daniel Vetterd5442302012-04-27 15:17:40 +02001002static ssize_t
1003i915_error_state_write(struct file *filp,
1004 const char __user *ubuf,
1005 size_t cnt,
1006 loff_t *ppos)
1007{
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001008 struct i915_gpu_state *error = filp->private_data;
1009
1010 if (!error)
1011 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001012
1013 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001014 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +02001015
1016 return cnt;
1017}
1018
1019static int i915_error_state_open(struct inode *inode, struct file *file)
1020{
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001021 file->private_data = i915_first_error_state(inode->i_private);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001022 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001023}
1024
Daniel Vetterd5442302012-04-27 15:17:40 +02001025static const struct file_operations i915_error_state_fops = {
1026 .owner = THIS_MODULE,
1027 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001028 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001029 .write = i915_error_state_write,
1030 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001031 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001032};
Chris Wilson98a2f412016-10-12 10:05:18 +01001033#endif
1034
Kees Cook647416f2013-03-10 14:10:06 -07001035static int
Kees Cook647416f2013-03-10 14:10:06 -07001036i915_next_seqno_set(void *data, u64 val)
Mika Kuoppala40633212012-12-04 15:12:00 +02001037{
David Weinehall36cdd012016-08-22 13:59:31 +03001038 struct drm_i915_private *dev_priv = data;
1039 struct drm_device *dev = &dev_priv->drm;
Mika Kuoppala40633212012-12-04 15:12:00 +02001040 int ret;
1041
Mika Kuoppala40633212012-12-04 15:12:00 +02001042 ret = mutex_lock_interruptible(&dev->struct_mutex);
1043 if (ret)
1044 return ret;
1045
Chris Wilson65c475c2018-01-02 15:12:31 +00001046 intel_runtime_pm_get(dev_priv);
Chris Wilson73cb9702016-10-28 13:58:46 +01001047 ret = i915_gem_set_global_seqno(dev, val);
Chris Wilson65c475c2018-01-02 15:12:31 +00001048 intel_runtime_pm_put(dev_priv);
1049
Mika Kuoppala40633212012-12-04 15:12:00 +02001050 mutex_unlock(&dev->struct_mutex);
1051
Kees Cook647416f2013-03-10 14:10:06 -07001052 return ret;
Mika Kuoppala40633212012-12-04 15:12:00 +02001053}
1054
Kees Cook647416f2013-03-10 14:10:06 -07001055DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
Chris Wilson9b6586a2017-02-23 07:44:08 +00001056 NULL, i915_next_seqno_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03001057 "0x%llx\n");
Mika Kuoppala40633212012-12-04 15:12:00 +02001058
Deepak Sadb4bd12014-03-31 11:30:02 +05301059static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001060{
David Weinehall36cdd012016-08-22 13:59:31 +03001061 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001062 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001063 int ret = 0;
1064
1065 intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001066
David Weinehall36cdd012016-08-22 13:59:31 +03001067 if (IS_GEN5(dev_priv)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001068 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074 MEMSTAT_VID_SHIFT);
1075 seq_printf(m, "Current P-state: %d\n",
1076 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001077 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001078 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001079
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001080 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001081
1082 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083 seq_printf(m, "Video Turbo Mode: %s\n",
1084 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085 seq_printf(m, "HW control enabled: %s\n",
1086 yesno(rpmodectl & GEN6_RP_ENABLE));
1087 seq_printf(m, "SW control enabled: %s\n",
1088 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089 GEN6_RP_MEDIA_SW_MODE));
1090
Wayne Boyer666a4532015-12-09 12:29:35 -08001091 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095 seq_printf(m, "actual GPU freq: %d MHz\n",
1096 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001099 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001100
1101 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001102 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001103
1104 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001105 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001106
1107 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001108 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001109
1110 seq_printf(m,
1111 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001112 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001113 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001114 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001115 u32 rp_state_limits;
1116 u32 gt_perf_status;
1117 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001118 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001119 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001120 u32 rpupei, rpcurup, rpprevup;
1121 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001122 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001123 int max_freq;
1124
Bob Paauwe35040562015-06-25 14:54:07 -07001125 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001126 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001127 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129 } else {
1130 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132 }
1133
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001134 /* RPSTAT1 is in the GT power well */
Mika Kuoppala59bad942015-01-16 11:34:40 +02001135 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001136
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001137 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001138 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301139 reqf >>= 23;
1140 else {
1141 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001142 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301143 reqf >>= 24;
1144 else
1145 reqf >>= 25;
1146 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001147 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001148
Chris Wilson0d8f9492014-03-27 09:06:14 +00001149 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
Jesse Barnesccab5c82011-01-18 15:49:25 -08001153 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301154 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001160 cagf = intel_gpu_freq(dev_priv,
1161 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001162
Mika Kuoppala59bad942015-01-16 11:34:40 +02001163 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001164
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001165 if (INTEL_GEN(dev_priv) >= 11) {
1166 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168 /*
1169 * The equivalent to the PM ISR & IIR cannot be read
1170 * without affecting the current state of the system
1171 */
1172 pm_isr = 0;
1173 pm_iir = 0;
1174 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001175 pm_ier = I915_READ(GEN8_GT_IER(2));
1176 pm_imr = I915_READ(GEN8_GT_IMR(2));
1177 pm_isr = I915_READ(GEN8_GT_ISR(2));
1178 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001179 } else {
1180 pm_ier = I915_READ(GEN6_PMIER);
1181 pm_imr = I915_READ(GEN6_PMIMR);
1182 pm_isr = I915_READ(GEN6_PMISR);
1183 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001184 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001185 pm_mask = I915_READ(GEN6_PMINTRMSK);
1186
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001187 seq_printf(m, "Video Turbo Mode: %s\n",
1188 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189 seq_printf(m, "HW control enabled: %s\n",
1190 yesno(rpmodectl & GEN6_RP_ENABLE));
1191 seq_printf(m, "SW control enabled: %s\n",
1192 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001194
1195 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196 pm_ier, pm_imr, pm_mask);
1197 if (INTEL_GEN(dev_priv) <= 10)
1198 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301200 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001201 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001202 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001203 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001204 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001205 seq_printf(m, "Render p-state VID: %d\n",
1206 gt_perf_status & 0xff);
1207 seq_printf(m, "Render p-state limit: %d\n",
1208 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001209 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001213 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001214 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301215 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001221 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001222
Akash Goeld6cda9c2016-04-23 00:05:46 +05301223 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1224 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1225 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1226 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1227 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1228 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001229 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001230
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001231 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001232 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001233 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001234 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001235 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001236 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001237
1238 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001239 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001240 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001241 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001242 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001243
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001244 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001245 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001246 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001247 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001248 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001249 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001250 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001251 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001252
Chris Wilsond86ed342015-04-27 13:41:19 +01001253 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001254 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001255 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001256 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001257 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001258 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001259 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001260 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001261 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001262 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001263 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001264 seq_printf(m,
1265 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001266 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001267 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001268 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001269 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001270
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001271 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001272 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1273 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1274
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001275 intel_runtime_pm_put(dev_priv);
1276 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001277}
1278
Ben Widawskyd6369512016-09-20 16:54:32 +03001279static void i915_instdone_info(struct drm_i915_private *dev_priv,
1280 struct seq_file *m,
1281 struct intel_instdone *instdone)
1282{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001283 int slice;
1284 int subslice;
1285
Ben Widawskyd6369512016-09-20 16:54:32 +03001286 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1287 instdone->instdone);
1288
1289 if (INTEL_GEN(dev_priv) <= 3)
1290 return;
1291
1292 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1293 instdone->slice_common);
1294
1295 if (INTEL_GEN(dev_priv) <= 6)
1296 return;
1297
Ben Widawskyf9e61372016-09-20 16:54:33 +03001298 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1299 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1300 slice, subslice, instdone->sampler[slice][subslice]);
1301
1302 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1303 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1304 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001305}
1306
Chris Wilsonf6544492015-01-26 18:03:04 +02001307static int i915_hangcheck_info(struct seq_file *m, void *unused)
1308{
David Weinehall36cdd012016-08-22 13:59:31 +03001309 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001310 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001311 u64 acthd[I915_NUM_ENGINES];
1312 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001313 struct intel_instdone instdone;
Dave Gordonc3232b12016-03-23 18:19:53 +00001314 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001315
Chris Wilson8af29b02016-09-09 14:11:47 +01001316 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001317 seq_puts(m, "Wedged\n");
1318 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1319 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1320 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1321 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001322 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001323 seq_puts(m, "Waiter holding struct mutex\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001324 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001325 seq_puts(m, "struct_mutex blocked for reset\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001326
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001327 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001328 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001329 return 0;
1330 }
1331
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001332 intel_runtime_pm_get(dev_priv);
1333
Akash Goel3b3f1652016-10-13 22:44:48 +05301334 for_each_engine(engine, dev_priv, id) {
Chris Wilson7e37f882016-08-02 22:50:21 +01001335 acthd[id] = intel_engine_get_active_head(engine);
Chris Wilson1b7744e2016-07-01 17:23:17 +01001336 seqno[id] = intel_engine_get_seqno(engine);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001337 }
1338
Akash Goel3b3f1652016-10-13 22:44:48 +05301339 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001340
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001341 intel_runtime_pm_put(dev_priv);
1342
Chris Wilson8352aea2017-03-03 09:00:56 +00001343 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1344 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001345 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1346 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001347 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1348 seq_puts(m, "Hangcheck active, work pending\n");
1349 else
1350 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001351
Chris Wilsonf73b5672017-03-02 15:03:56 +00001352 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1353
Akash Goel3b3f1652016-10-13 22:44:48 +05301354 for_each_engine(engine, dev_priv, id) {
Chris Wilson33f53712016-10-04 21:11:32 +01001355 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1356 struct rb_node *rb;
1357
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001358 seq_printf(m, "%s:\n", engine->name);
Chris Wilson52d7f162018-04-30 14:15:00 +01001359 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
Chris Wilsoncb399ea2016-11-01 10:03:16 +00001360 engine->hangcheck.seqno, seqno[id],
Chris Wilson52d7f162018-04-30 14:15:00 +01001361 intel_engine_last_submit(engine));
Chris Wilson1fd00c0f2018-06-02 11:48:53 +01001362 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
Chris Wilson83348ba2016-08-09 17:47:51 +01001363 yesno(intel_engine_has_waiter(engine)),
1364 yesno(test_bit(engine->id,
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001365 &dev_priv->gpu_error.missed_irq_rings)),
Chris Wilson1fd00c0f2018-06-02 11:48:53 +01001366 yesno(engine->hangcheck.stalled),
1367 yesno(engine->hangcheck.wedged));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001368
Chris Wilson61d3dc72017-03-03 19:08:24 +00001369 spin_lock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001370 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
Geliang Tangf802cf72016-12-19 22:43:49 +08001371 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
Chris Wilson33f53712016-10-04 21:11:32 +01001372
1373 seq_printf(m, "\t%s [%d] waiting for %x\n",
1374 w->tsk->comm, w->tsk->pid, w->seqno);
1375 }
Chris Wilson61d3dc72017-03-03 19:08:24 +00001376 spin_unlock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001377
Chris Wilsonf6544492015-01-26 18:03:04 +02001378 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001379 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001380 (long long)acthd[id]);
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001381 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1382 hangcheck_action_to_str(engine->hangcheck.action),
1383 engine->hangcheck.action,
1384 jiffies_to_msecs(jiffies -
1385 engine->hangcheck.action_timestamp));
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001386
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001387 if (engine->id == RCS) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001388 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001389
Ben Widawskyd6369512016-09-20 16:54:32 +03001390 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001391
Ben Widawskyd6369512016-09-20 16:54:32 +03001392 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001393
Ben Widawskyd6369512016-09-20 16:54:32 +03001394 i915_instdone_info(dev_priv, m,
1395 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001396 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001397 }
1398
1399 return 0;
1400}
1401
Michel Thierry061d06a2017-06-20 10:57:49 +01001402static int i915_reset_info(struct seq_file *m, void *unused)
1403{
1404 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1405 struct i915_gpu_error *error = &dev_priv->gpu_error;
1406 struct intel_engine_cs *engine;
1407 enum intel_engine_id id;
1408
1409 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1410
1411 for_each_engine(engine, dev_priv, id) {
1412 seq_printf(m, "%s = %u\n", engine->name,
1413 i915_reset_engine_count(error, engine));
1414 }
1415
1416 return 0;
1417}
1418
Ben Widawsky4d855292011-12-12 19:34:16 -08001419static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001420{
David Weinehall36cdd012016-08-22 13:59:31 +03001421 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001422 u32 rgvmodectl, rstdbyctl;
1423 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001424
Ben Widawsky616fdb52011-10-05 11:44:54 -07001425 rgvmodectl = I915_READ(MEMMODECTL);
1426 rstdbyctl = I915_READ(RSTDBYCTL);
1427 crstandvid = I915_READ16(CRSTANDVID);
1428
Jani Nikula742f4912015-09-03 11:16:09 +03001429 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001430 seq_printf(m, "Boost freq: %d\n",
1431 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1432 MEMMODE_BOOST_FREQ_SHIFT);
1433 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001434 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001435 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001436 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001437 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001438 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001439 seq_printf(m, "Starting frequency: P%d\n",
1440 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001441 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001442 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001443 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1444 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1445 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1446 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001447 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001448 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001449 switch (rstdbyctl & RSX_STATUS_MASK) {
1450 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001451 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001452 break;
1453 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001454 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001455 break;
1456 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001457 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001458 break;
1459 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001460 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001461 break;
1462 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001463 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001464 break;
1465 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001466 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001467 break;
1468 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001469 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001470 break;
1471 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001472
1473 return 0;
1474}
1475
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001476static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001477{
Chris Wilson233ebf52017-03-23 10:19:44 +00001478 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001479 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001480 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001481
Chris Wilsond7a133d2017-09-07 14:44:41 +01001482 seq_printf(m, "user.bypass_count = %u\n",
1483 i915->uncore.user_forcewake.count);
1484
Chris Wilson233ebf52017-03-23 10:19:44 +00001485 for_each_fw_domain(fw_domain, i915, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001486 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001487 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001488 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001489
1490 return 0;
1491}
1492
Mika Kuoppala13628772017-03-15 17:43:02 +02001493static void print_rc6_res(struct seq_file *m,
1494 const char *title,
1495 const i915_reg_t reg)
1496{
1497 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1498
1499 seq_printf(m, "%s %u (%llu us)\n",
1500 title, I915_READ(reg),
1501 intel_rc6_residency_us(dev_priv, reg));
1502}
1503
Deepak S669ab5a2014-01-10 15:18:26 +05301504static int vlv_drpc_info(struct seq_file *m)
1505{
David Weinehall36cdd012016-08-22 13:59:31 +03001506 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001507 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301508
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001509 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301510 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1511
Deepak S669ab5a2014-01-10 15:18:26 +05301512 seq_printf(m, "RC6 Enabled: %s\n",
1513 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1514 GEN6_RC_CTL_EI_MODE(1))));
1515 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001516 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301517 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001518 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301519
Mika Kuoppala13628772017-03-15 17:43:02 +02001520 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1521 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001522
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001523 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301524}
1525
Ben Widawsky4d855292011-12-12 19:34:16 -08001526static int gen6_drpc_info(struct seq_file *m)
1527{
David Weinehall36cdd012016-08-22 13:59:31 +03001528 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001529 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301530 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001531
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001532 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001533 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001534
Ben Widawsky4d855292011-12-12 19:34:16 -08001535 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001536 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301537 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1538 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1539 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001540
Imre Deak51cc9ad2018-02-08 19:41:02 +02001541 if (INTEL_GEN(dev_priv) <= 7) {
1542 mutex_lock(&dev_priv->pcu_lock);
1543 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1544 &rc6vids);
1545 mutex_unlock(&dev_priv->pcu_lock);
1546 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001547
Eric Anholtfff24e22012-01-23 16:14:05 -08001548 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001549 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1550 seq_printf(m, "RC6 Enabled: %s\n",
1551 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001552 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301553 seq_printf(m, "Render Well Gating Enabled: %s\n",
1554 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1555 seq_printf(m, "Media Well Gating Enabled: %s\n",
1556 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1557 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001558 seq_printf(m, "Deep RC6 Enabled: %s\n",
1559 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1560 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1561 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001562 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001563 switch (gt_core_status & GEN6_RCn_MASK) {
1564 case GEN6_RC0:
1565 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001566 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001567 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001568 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001569 break;
1570 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001571 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001572 break;
1573 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001574 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001575 break;
1576 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001577 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001578 break;
1579 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001580 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001581 break;
1582 }
1583
1584 seq_printf(m, "Core Power Down: %s\n",
1585 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001586 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301587 seq_printf(m, "Render Power Well: %s\n",
1588 (gen9_powergate_status &
1589 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1590 seq_printf(m, "Media Power Well: %s\n",
1591 (gen9_powergate_status &
1592 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1593 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001594
1595 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001596 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1597 GEN6_GT_GFX_RC6_LOCKED);
1598 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1599 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1600 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001601
Imre Deak51cc9ad2018-02-08 19:41:02 +02001602 if (INTEL_GEN(dev_priv) <= 7) {
1603 seq_printf(m, "RC6 voltage: %dmV\n",
1604 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1605 seq_printf(m, "RC6+ voltage: %dmV\n",
1606 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1607 seq_printf(m, "RC6++ voltage: %dmV\n",
1608 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1609 }
1610
Akash Goelf2dd7572016-06-27 20:10:01 +05301611 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001612}
1613
1614static int i915_drpc_info(struct seq_file *m, void *unused)
1615{
David Weinehall36cdd012016-08-22 13:59:31 +03001616 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001617 int err;
1618
1619 intel_runtime_pm_get(dev_priv);
Ben Widawsky4d855292011-12-12 19:34:16 -08001620
David Weinehall36cdd012016-08-22 13:59:31 +03001621 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001622 err = vlv_drpc_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +03001623 else if (INTEL_GEN(dev_priv) >= 6)
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001624 err = gen6_drpc_info(m);
Ben Widawsky4d855292011-12-12 19:34:16 -08001625 else
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001626 err = ironlake_drpc_info(m);
1627
1628 intel_runtime_pm_put(dev_priv);
1629
1630 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001631}
1632
Daniel Vetter9a851782015-06-18 10:30:22 +02001633static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1634{
David Weinehall36cdd012016-08-22 13:59:31 +03001635 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001636
1637 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1638 dev_priv->fb_tracking.busy_bits);
1639
1640 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1641 dev_priv->fb_tracking.flip_bits);
1642
1643 return 0;
1644}
1645
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001646static int i915_fbc_status(struct seq_file *m, void *unused)
1647{
David Weinehall36cdd012016-08-22 13:59:31 +03001648 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001649 struct intel_fbc *fbc = &dev_priv->fbc;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001650
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001651 if (!HAS_FBC(dev_priv))
1652 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001653
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001654 intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001655 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001656
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001657 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001658 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001659 else
Chris Wilson31388722017-12-20 20:58:48 +00001660 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1661
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001662 if (intel_fbc_is_active(dev_priv)) {
1663 u32 mask;
1664
1665 if (INTEL_GEN(dev_priv) >= 8)
1666 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1667 else if (INTEL_GEN(dev_priv) >= 7)
1668 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1669 else if (INTEL_GEN(dev_priv) >= 5)
1670 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1671 else if (IS_G4X(dev_priv))
1672 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1673 else
1674 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1675 FBC_STAT_COMPRESSED);
1676
1677 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001678 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001679
Chris Wilson31388722017-12-20 20:58:48 +00001680 mutex_unlock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001681 intel_runtime_pm_put(dev_priv);
1682
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001683 return 0;
1684}
1685
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001686static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001687{
David Weinehall36cdd012016-08-22 13:59:31 +03001688 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001689
David Weinehall36cdd012016-08-22 13:59:31 +03001690 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001691 return -ENODEV;
1692
Rodrigo Vivida46f932014-08-01 02:04:45 -07001693 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001694
1695 return 0;
1696}
1697
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001698static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001699{
David Weinehall36cdd012016-08-22 13:59:31 +03001700 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001701 u32 reg;
1702
David Weinehall36cdd012016-08-22 13:59:31 +03001703 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001704 return -ENODEV;
1705
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001706 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001707
1708 reg = I915_READ(ILK_DPFC_CONTROL);
1709 dev_priv->fbc.false_color = val;
1710
1711 I915_WRITE(ILK_DPFC_CONTROL, val ?
1712 (reg | FBC_CTL_FALSE_COLOR) :
1713 (reg & ~FBC_CTL_FALSE_COLOR));
1714
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001715 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001716 return 0;
1717}
1718
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001719DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1720 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001721 "%llu\n");
1722
Paulo Zanoni92d44622013-05-31 16:33:24 -03001723static int i915_ips_status(struct seq_file *m, void *unused)
1724{
David Weinehall36cdd012016-08-22 13:59:31 +03001725 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Paulo Zanoni92d44622013-05-31 16:33:24 -03001726
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001727 if (!HAS_IPS(dev_priv))
1728 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001729
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001730 intel_runtime_pm_get(dev_priv);
1731
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001732 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001733 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001734
David Weinehall36cdd012016-08-22 13:59:31 +03001735 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001736 seq_puts(m, "Currently: unknown\n");
1737 } else {
1738 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1739 seq_puts(m, "Currently: enabled\n");
1740 else
1741 seq_puts(m, "Currently: disabled\n");
1742 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001743
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001744 intel_runtime_pm_put(dev_priv);
1745
Paulo Zanoni92d44622013-05-31 16:33:24 -03001746 return 0;
1747}
1748
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001749static int i915_sr_status(struct seq_file *m, void *unused)
1750{
David Weinehall36cdd012016-08-22 13:59:31 +03001751 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001752 bool sr_enabled = false;
1753
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001754 intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01001755 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001756
Chris Wilson7342a722017-03-09 14:20:49 +00001757 if (INTEL_GEN(dev_priv) >= 9)
1758 /* no global SR status; inspect per-plane WM */;
1759 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001760 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001761 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001762 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001763 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001764 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001765 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001766 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001767 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001768 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001769 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001770
Chris Wilson9c870d02016-10-24 13:42:15 +01001771 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001772 intel_runtime_pm_put(dev_priv);
1773
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001774 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001775
1776 return 0;
1777}
1778
Jesse Barnes7648fa92010-05-20 14:28:11 -07001779static int i915_emon_status(struct seq_file *m, void *unused)
1780{
David Weinehall36cdd012016-08-22 13:59:31 +03001781 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1782 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001783 unsigned long temp, chipset, gfx;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001784 int ret;
1785
David Weinehall36cdd012016-08-22 13:59:31 +03001786 if (!IS_GEN5(dev_priv))
Chris Wilson582be6b2012-04-30 19:35:02 +01001787 return -ENODEV;
1788
Chris Wilsonde227ef2010-07-03 07:58:38 +01001789 ret = mutex_lock_interruptible(&dev->struct_mutex);
1790 if (ret)
1791 return ret;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001792
1793 temp = i915_mch_val(dev_priv);
1794 chipset = i915_chipset_val(dev_priv);
1795 gfx = i915_gfx_val(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +01001796 mutex_unlock(&dev->struct_mutex);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001797
1798 seq_printf(m, "GMCH temp: %ld\n", temp);
1799 seq_printf(m, "Chipset power: %ld\n", chipset);
1800 seq_printf(m, "GFX power: %ld\n", gfx);
1801 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1802
1803 return 0;
1804}
1805
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001806static int i915_ring_freq_table(struct seq_file *m, void *unused)
1807{
David Weinehall36cdd012016-08-22 13:59:31 +03001808 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001809 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301810 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001811 int gpu_freq, ia_freq;
1812 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001813
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001814 if (!HAS_LLC(dev_priv))
1815 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001816
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001817 intel_runtime_pm_get(dev_priv);
1818
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001819 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001820 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001821 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001822
Chris Wilsond586b5f2018-03-08 14:26:48 +00001823 min_gpu_freq = rps->min_freq;
1824 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001825 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301826 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001827 min_gpu_freq /= GEN9_FREQ_SCALER;
1828 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301829 }
1830
Damien Lespiau267f0c92013-06-24 22:59:48 +01001831 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001832
Akash Goelf936ec32015-06-29 14:50:22 +05301833 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001834 ia_freq = gpu_freq;
1835 sandybridge_pcode_read(dev_priv,
1836 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1837 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001838 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301839 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001840 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001841 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001842 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001843 ((ia_freq >> 0) & 0xff) * 100,
1844 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001845 }
1846
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001847 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001848
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001849out:
1850 intel_runtime_pm_put(dev_priv);
1851 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001852}
1853
Chris Wilson44834a62010-08-19 16:09:23 +01001854static int i915_opregion(struct seq_file *m, void *unused)
1855{
David Weinehall36cdd012016-08-22 13:59:31 +03001856 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1857 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001858 struct intel_opregion *opregion = &dev_priv->opregion;
1859 int ret;
1860
1861 ret = mutex_lock_interruptible(&dev->struct_mutex);
1862 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001863 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001864
Jani Nikula2455a8e2015-12-14 12:50:53 +02001865 if (opregion->header)
1866 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001867
1868 mutex_unlock(&dev->struct_mutex);
1869
Daniel Vetter0d38f002012-04-21 22:49:10 +02001870out:
Chris Wilson44834a62010-08-19 16:09:23 +01001871 return 0;
1872}
1873
Jani Nikulaada8f952015-12-15 13:17:12 +02001874static int i915_vbt(struct seq_file *m, void *unused)
1875{
David Weinehall36cdd012016-08-22 13:59:31 +03001876 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001877
1878 if (opregion->vbt)
1879 seq_write(m, opregion->vbt, opregion->vbt_size);
1880
1881 return 0;
1882}
1883
Chris Wilson37811fc2010-08-25 22:45:57 +01001884static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1885{
David Weinehall36cdd012016-08-22 13:59:31 +03001886 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1887 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301888 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001889 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001890 int ret;
1891
1892 ret = mutex_lock_interruptible(&dev->struct_mutex);
1893 if (ret)
1894 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001895
Daniel Vetter06957262015-08-10 13:34:08 +02001896#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001897 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001898 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001899
Chris Wilson25bcce92016-07-02 15:36:00 +01001900 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1901 fbdev_fb->base.width,
1902 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001903 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001904 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001905 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001906 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001907 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001908 seq_putc(m, '\n');
1909 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001910#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001911
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001912 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001913 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301914 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1915 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001916 continue;
1917
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001918 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001919 fb->base.width,
1920 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001921 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001922 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001923 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001924 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001925 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001926 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001927 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001928 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001929 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001930
1931 return 0;
1932}
1933
Chris Wilson7e37f882016-08-02 22:50:21 +01001934static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001935{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001936 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1937 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001938}
1939
Ben Widawskye76d3632011-03-19 18:14:29 -07001940static int i915_context_status(struct seq_file *m, void *unused)
1941{
David Weinehall36cdd012016-08-22 13:59:31 +03001942 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1943 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001944 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001945 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05301946 enum intel_engine_id id;
Dave Gordonc3232b12016-03-23 18:19:53 +00001947 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001948
Daniel Vetterf3d28872014-05-29 23:23:08 +02001949 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001950 if (ret)
1951 return ret;
1952
Chris Wilson829a0af2017-06-20 12:05:45 +01001953 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson5d1808e2016-04-28 09:56:51 +01001954 seq_printf(m, "HW context %u ", ctx->hw_id);
Chris Wilsonc84455b2016-08-15 10:49:08 +01001955 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001956 struct task_struct *task;
1957
Chris Wilsonc84455b2016-08-15 10:49:08 +01001958 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001959 if (task) {
1960 seq_printf(m, "(%s [%d]) ",
1961 task->comm, task->pid);
1962 put_task_struct(task);
1963 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001964 } else if (IS_ERR(ctx->file_priv)) {
1965 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001966 } else {
1967 seq_puts(m, "(kernel) ");
1968 }
1969
Chris Wilsonbca44d82016-05-24 14:53:41 +01001970 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1971 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001972
Akash Goel3b3f1652016-10-13 22:44:48 +05301973 for_each_engine(engine, dev_priv, id) {
Chris Wilsonab82a062018-04-30 14:15:01 +01001974 struct intel_context *ce =
1975 to_intel_context(ctx, engine);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001976
1977 seq_printf(m, "%s: ", engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001978 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001979 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001980 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001981 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001982 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001983 }
1984
Ben Widawskya33afea2013-09-17 21:12:45 -07001985 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001986 }
1987
Daniel Vetterf3d28872014-05-29 23:23:08 +02001988 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001989
1990 return 0;
1991}
1992
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001993static const char *swizzle_string(unsigned swizzle)
1994{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001995 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001996 case I915_BIT_6_SWIZZLE_NONE:
1997 return "none";
1998 case I915_BIT_6_SWIZZLE_9:
1999 return "bit9";
2000 case I915_BIT_6_SWIZZLE_9_10:
2001 return "bit9/bit10";
2002 case I915_BIT_6_SWIZZLE_9_11:
2003 return "bit9/bit11";
2004 case I915_BIT_6_SWIZZLE_9_10_11:
2005 return "bit9/bit10/bit11";
2006 case I915_BIT_6_SWIZZLE_9_17:
2007 return "bit9/bit17";
2008 case I915_BIT_6_SWIZZLE_9_10_17:
2009 return "bit9/bit10/bit17";
2010 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09002011 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002012 }
2013
2014 return "bug";
2015}
2016
2017static int i915_swizzle_info(struct seq_file *m, void *data)
2018{
David Weinehall36cdd012016-08-22 13:59:31 +03002019 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002020
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002021 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02002022
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002023 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2024 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2025 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2026 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2027
David Weinehall36cdd012016-08-22 13:59:31 +03002028 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002029 seq_printf(m, "DDC = 0x%08x\n",
2030 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01002031 seq_printf(m, "DDC2 = 0x%08x\n",
2032 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002033 seq_printf(m, "C0DRB3 = 0x%04x\n",
2034 I915_READ16(C0DRB3));
2035 seq_printf(m, "C1DRB3 = 0x%04x\n",
2036 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03002037 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002038 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2039 I915_READ(MAD_DIMM_C0));
2040 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2041 I915_READ(MAD_DIMM_C1));
2042 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2043 I915_READ(MAD_DIMM_C2));
2044 seq_printf(m, "TILECTL = 0x%08x\n",
2045 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03002046 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07002047 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2048 I915_READ(GAMTARBMODE));
2049 else
2050 seq_printf(m, "ARB_MODE = 0x%08x\n",
2051 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002052 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2053 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002054 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01002055
2056 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2057 seq_puts(m, "L-shaped memory detected\n");
2058
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002059 intel_runtime_pm_put(dev_priv);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002060
2061 return 0;
2062}
2063
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002064static int per_file_ctx(int id, void *ptr, void *data)
2065{
Chris Wilsone2efd132016-05-24 14:53:34 +01002066 struct i915_gem_context *ctx = ptr;
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002067 struct seq_file *m = data;
Daniel Vetterae6c4802014-08-06 15:04:53 +02002068 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2069
2070 if (!ppgtt) {
2071 seq_printf(m, " no ppgtt for context %d\n",
2072 ctx->user_handle);
2073 return 0;
2074 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002075
Oscar Mateof83d6512014-05-22 14:13:38 +01002076 if (i915_gem_context_is_default(ctx))
2077 seq_puts(m, " default context:\n");
2078 else
Oscar Mateo821d66d2014-07-03 16:28:00 +01002079 seq_printf(m, " context %d:\n", ctx->user_handle);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002080 ppgtt->debug_dump(ppgtt, m);
2081
2082 return 0;
2083}
2084
David Weinehall36cdd012016-08-22 13:59:31 +03002085static void gen8_ppgtt_info(struct seq_file *m,
2086 struct drm_i915_private *dev_priv)
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002087{
Ben Widawsky77df6772013-11-02 21:07:30 -07002088 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Akash Goel3b3f1652016-10-13 22:44:48 +05302089 struct intel_engine_cs *engine;
2090 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002091 int i;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002092
Ben Widawsky77df6772013-11-02 21:07:30 -07002093 if (!ppgtt)
2094 return;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002095
Akash Goel3b3f1652016-10-13 22:44:48 +05302096 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002097 seq_printf(m, "%s\n", engine->name);
Ben Widawsky77df6772013-11-02 21:07:30 -07002098 for (i = 0; i < 4; i++) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002099 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
Ben Widawsky77df6772013-11-02 21:07:30 -07002100 pdp <<= 32;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002101 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
Ville Syrjäläa2a5b152014-03-31 18:17:16 +03002102 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
Ben Widawsky77df6772013-11-02 21:07:30 -07002103 }
2104 }
2105}
2106
David Weinehall36cdd012016-08-22 13:59:31 +03002107static void gen6_ppgtt_info(struct seq_file *m,
2108 struct drm_i915_private *dev_priv)
Ben Widawsky77df6772013-11-02 21:07:30 -07002109{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002110 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302111 enum intel_engine_id id;
Ben Widawsky77df6772013-11-02 21:07:30 -07002112
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002113 if (IS_GEN6(dev_priv))
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002114 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2115
Akash Goel3b3f1652016-10-13 22:44:48 +05302116 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002117 seq_printf(m, "%s\n", engine->name);
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002118 if (IS_GEN7(dev_priv))
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002119 seq_printf(m, "GFX_MODE: 0x%08x\n",
2120 I915_READ(RING_MODE_GEN7(engine)));
2121 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2122 I915_READ(RING_PP_DIR_BASE(engine)));
2123 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2124 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2125 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2126 I915_READ(RING_PP_DIR_DCLV(engine)));
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002127 }
2128 if (dev_priv->mm.aliasing_ppgtt) {
2129 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2130
Damien Lespiau267f0c92013-06-24 22:59:48 +01002131 seq_puts(m, "aliasing PPGTT:\n");
Mika Kuoppala44159dd2015-06-25 18:35:07 +03002132 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002133
Ben Widawsky87d60b62013-12-06 14:11:29 -08002134 ppgtt->debug_dump(ppgtt, m);
Daniel Vetterae6c4802014-08-06 15:04:53 +02002135 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002136
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002137 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
Ben Widawsky77df6772013-11-02 21:07:30 -07002138}
2139
2140static int i915_ppgtt_info(struct seq_file *m, void *data)
2141{
David Weinehall36cdd012016-08-22 13:59:31 +03002142 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2143 struct drm_device *dev = &dev_priv->drm;
Michel Thierryea91e402015-07-29 17:23:57 +01002144 struct drm_file *file;
Chris Wilson637ee292016-08-22 14:28:20 +01002145 int ret;
Ben Widawsky77df6772013-11-02 21:07:30 -07002146
Chris Wilson637ee292016-08-22 14:28:20 +01002147 mutex_lock(&dev->filelist_mutex);
2148 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawsky77df6772013-11-02 21:07:30 -07002149 if (ret)
Chris Wilson637ee292016-08-22 14:28:20 +01002150 goto out_unlock;
2151
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002152 intel_runtime_pm_get(dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002153
David Weinehall36cdd012016-08-22 13:59:31 +03002154 if (INTEL_GEN(dev_priv) >= 8)
2155 gen8_ppgtt_info(m, dev_priv);
2156 else if (INTEL_GEN(dev_priv) >= 6)
2157 gen6_ppgtt_info(m, dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002158
Michel Thierryea91e402015-07-29 17:23:57 +01002159 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2160 struct drm_i915_file_private *file_priv = file->driver_priv;
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002161 struct task_struct *task;
Michel Thierryea91e402015-07-29 17:23:57 +01002162
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002163 task = get_pid_task(file->pid, PIDTYPE_PID);
Dan Carpenter06812762015-10-02 18:14:22 +03002164 if (!task) {
2165 ret = -ESRCH;
Chris Wilson637ee292016-08-22 14:28:20 +01002166 goto out_rpm;
Dan Carpenter06812762015-10-02 18:14:22 +03002167 }
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002168 seq_printf(m, "\nproc: %s\n", task->comm);
2169 put_task_struct(task);
Michel Thierryea91e402015-07-29 17:23:57 +01002170 idr_for_each(&file_priv->context_idr, per_file_ctx,
2171 (void *)(unsigned long)m);
2172 }
2173
Chris Wilson637ee292016-08-22 14:28:20 +01002174out_rpm:
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002175 intel_runtime_pm_put(dev_priv);
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002176 mutex_unlock(&dev->struct_mutex);
Chris Wilson637ee292016-08-22 14:28:20 +01002177out_unlock:
2178 mutex_unlock(&dev->filelist_mutex);
Dan Carpenter06812762015-10-02 18:14:22 +03002179 return ret;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002180}
2181
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002182static int count_irq_waiters(struct drm_i915_private *i915)
2183{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002184 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302185 enum intel_engine_id id;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002186 int count = 0;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002187
Akash Goel3b3f1652016-10-13 22:44:48 +05302188 for_each_engine(engine, i915, id)
Chris Wilson688e6c72016-07-01 17:23:15 +01002189 count += intel_engine_has_waiter(engine);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002190
2191 return count;
2192}
2193
Chris Wilson7466c292016-08-15 09:49:33 +01002194static const char *rps_power_to_str(unsigned int power)
2195{
2196 static const char * const strings[] = {
2197 [LOW_POWER] = "low power",
2198 [BETWEEN] = "mixed",
2199 [HIGH_POWER] = "high power",
2200 };
2201
2202 if (power >= ARRAY_SIZE(strings) || !strings[power])
2203 return "unknown";
2204
2205 return strings[power];
2206}
2207
Chris Wilson1854d5c2015-04-07 16:20:32 +01002208static int i915_rps_boost_info(struct seq_file *m, void *data)
2209{
David Weinehall36cdd012016-08-22 13:59:31 +03002210 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2211 struct drm_device *dev = &dev_priv->drm;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002212 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002213 struct drm_file *file;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002214
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002215 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002216 seq_printf(m, "GPU busy? %s [%d requests]\n",
2217 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002218 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002219 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002220 atomic_read(&rps->num_waiters));
Chris Wilson7466c292016-08-15 09:49:33 +01002221 seq_printf(m, "Frequency requested %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002222 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002223 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002224 intel_gpu_freq(dev_priv, rps->min_freq),
2225 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2226 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2227 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002228 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002229 intel_gpu_freq(dev_priv, rps->idle_freq),
2230 intel_gpu_freq(dev_priv, rps->efficient_freq),
2231 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002232
2233 mutex_lock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002234 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2235 struct drm_i915_file_private *file_priv = file->driver_priv;
2236 struct task_struct *task;
2237
2238 rcu_read_lock();
2239 task = pid_task(file->pid, PIDTYPE_PID);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002240 seq_printf(m, "%s [%d]: %d boosts\n",
Chris Wilson1854d5c2015-04-07 16:20:32 +01002241 task ? task->comm : "<unknown>",
2242 task ? task->pid : -1,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002243 atomic_read(&file_priv->rps_client.boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002244 rcu_read_unlock();
2245 }
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002246 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002247 atomic_read(&rps->boosts));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002248 mutex_unlock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002249
Chris Wilson7466c292016-08-15 09:49:33 +01002250 if (INTEL_GEN(dev_priv) >= 6 &&
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002251 rps->enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002252 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002253 u32 rpup, rpupei;
2254 u32 rpdown, rpdownei;
2255
2256 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2257 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2258 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2259 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2260 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2261 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2262
2263 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002264 rps_power_to_str(rps->power));
Chris Wilson7466c292016-08-15 09:49:33 +01002265 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002266 rpup && rpupei ? 100 * rpup / rpupei : 0,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002267 rps->up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002268 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002269 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002270 rps->down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002271 } else {
2272 seq_puts(m, "\nRPS Autotuning inactive\n");
2273 }
2274
Chris Wilson8d3afd72015-05-21 21:01:47 +01002275 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002276}
2277
Ben Widawsky63573eb2013-07-04 11:02:07 -07002278static int i915_llc(struct seq_file *m, void *data)
2279{
David Weinehall36cdd012016-08-22 13:59:31 +03002280 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002281 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002282
David Weinehall36cdd012016-08-22 13:59:31 +03002283 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002284 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2285 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002286
2287 return 0;
2288}
2289
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002290static int i915_huc_load_status_info(struct seq_file *m, void *data)
2291{
2292 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002293 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002294
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002295 if (!HAS_HUC(dev_priv))
2296 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002297
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002298 p = drm_seq_file_printer(m);
2299 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002300
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302301 intel_runtime_pm_get(dev_priv);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002302 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302303 intel_runtime_pm_put(dev_priv);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002304
2305 return 0;
2306}
2307
Alex Daifdf5d352015-08-12 15:43:37 +01002308static int i915_guc_load_status_info(struct seq_file *m, void *data)
2309{
David Weinehall36cdd012016-08-22 13:59:31 +03002310 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002311 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002312 u32 tmp, i;
2313
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002314 if (!HAS_GUC(dev_priv))
2315 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002316
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002317 p = drm_seq_file_printer(m);
2318 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002319
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302320 intel_runtime_pm_get(dev_priv);
2321
Alex Daifdf5d352015-08-12 15:43:37 +01002322 tmp = I915_READ(GUC_STATUS);
2323
2324 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2325 seq_printf(m, "\tBootrom status = 0x%x\n",
2326 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2327 seq_printf(m, "\tuKernel status = 0x%x\n",
2328 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2329 seq_printf(m, "\tMIA Core status = 0x%x\n",
2330 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2331 seq_puts(m, "\nScratch registers:\n");
2332 for (i = 0; i < 16; i++)
2333 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2334
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302335 intel_runtime_pm_put(dev_priv);
2336
Alex Daifdf5d352015-08-12 15:43:37 +01002337 return 0;
2338}
2339
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002340static const char *
2341stringify_guc_log_type(enum guc_log_buffer_type type)
2342{
2343 switch (type) {
2344 case GUC_ISR_LOG_BUFFER:
2345 return "ISR";
2346 case GUC_DPC_LOG_BUFFER:
2347 return "DPC";
2348 case GUC_CRASH_DUMP_LOG_BUFFER:
2349 return "CRASH";
2350 default:
2351 MISSING_CASE(type);
2352 }
2353
2354 return "";
2355}
2356
Akash Goel5aa1ee42016-10-12 21:54:36 +05302357static void i915_guc_log_info(struct seq_file *m,
2358 struct drm_i915_private *dev_priv)
2359{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002360 struct intel_guc_log *log = &dev_priv->guc.log;
2361 enum guc_log_buffer_type type;
2362
2363 if (!intel_guc_log_relay_enabled(log)) {
2364 seq_puts(m, "GuC log relay disabled\n");
2365 return;
2366 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302367
Michał Winiarskidb557992018-03-19 10:53:43 +01002368 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302369
Michał Winiarski6a96be22018-03-19 10:53:42 +01002370 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002371 log->relay.full_count);
2372
2373 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2374 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2375 stringify_guc_log_type(type),
2376 log->stats[type].flush,
2377 log->stats[type].sampled_overflow);
2378 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302379}
2380
Dave Gordon8b417c22015-08-12 15:43:44 +01002381static void i915_guc_client_info(struct seq_file *m,
2382 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302383 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002384{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002385 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002386 enum intel_engine_id id;
Dave Gordon8b417c22015-08-12 15:43:44 +01002387 uint64_t tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002388
Oscar Mateob09935a2017-03-22 10:39:53 -07002389 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2390 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002391 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2392 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002393
Akash Goel3b3f1652016-10-13 22:44:48 +05302394 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002395 u64 submissions = client->submissions[id];
2396 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002397 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002398 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002399 }
2400 seq_printf(m, "\tTotal: %llu\n", tot);
2401}
2402
2403static int i915_guc_info(struct seq_file *m, void *data)
2404{
David Weinehall36cdd012016-08-22 13:59:31 +03002405 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002406 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002407
Michał Winiarskidb557992018-03-19 10:53:43 +01002408 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002409 return -ENODEV;
2410
Michał Winiarskidb557992018-03-19 10:53:43 +01002411 i915_guc_log_info(m, dev_priv);
2412
2413 if (!USES_GUC_SUBMISSION(dev_priv))
2414 return 0;
2415
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002416 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002417
Michał Winiarskidb557992018-03-19 10:53:43 +01002418 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002419 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002420 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002421
Chris Wilson334636c2016-11-29 12:10:20 +00002422 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2423 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002424 if (guc->preempt_client) {
2425 seq_printf(m, "\nGuC preempt client @ %p:\n",
2426 guc->preempt_client);
2427 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2428 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002429
2430 /* Add more as required ... */
2431
2432 return 0;
2433}
2434
Oscar Mateoa8b93702017-05-10 15:04:51 +00002435static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002436{
David Weinehall36cdd012016-08-22 13:59:31 +03002437 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002438 const struct intel_guc *guc = &dev_priv->guc;
2439 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302440 struct intel_guc_client *client = guc->execbuf_client;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002441 unsigned int tmp;
2442 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002443
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002444 if (!USES_GUC_SUBMISSION(dev_priv))
2445 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002446
Oscar Mateoa8b93702017-05-10 15:04:51 +00002447 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2448 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002449
Oscar Mateoa8b93702017-05-10 15:04:51 +00002450 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2451 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002452
Oscar Mateoa8b93702017-05-10 15:04:51 +00002453 seq_printf(m, "GuC stage descriptor %u:\n", index);
2454 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2455 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2456 seq_printf(m, "\tPriority: %d\n", desc->priority);
2457 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2458 seq_printf(m, "\tEngines used: 0x%x\n",
2459 desc->engines_used);
2460 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2461 desc->db_trigger_phy,
2462 desc->db_trigger_cpu,
2463 desc->db_trigger_uk);
2464 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2465 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002466 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002467 desc->wq_addr, desc->wq_size);
2468 seq_putc(m, '\n');
2469
2470 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2471 u32 guc_engine_id = engine->guc_id;
2472 struct guc_execlist_context *lrc =
2473 &desc->lrc[guc_engine_id];
2474
2475 seq_printf(m, "\t%s LRC:\n", engine->name);
2476 seq_printf(m, "\t\tContext desc: 0x%x\n",
2477 lrc->context_desc);
2478 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2479 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2480 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2481 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2482 seq_putc(m, '\n');
2483 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002484 }
2485
Oscar Mateoa8b93702017-05-10 15:04:51 +00002486 return 0;
2487}
2488
Alex Dai4c7e77f2015-08-12 15:43:40 +01002489static int i915_guc_log_dump(struct seq_file *m, void *data)
2490{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002491 struct drm_info_node *node = m->private;
2492 struct drm_i915_private *dev_priv = node_to_i915(node);
2493 bool dump_load_err = !!node->info_ent->data;
2494 struct drm_i915_gem_object *obj = NULL;
2495 u32 *log;
2496 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002497
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002498 if (!HAS_GUC(dev_priv))
2499 return -ENODEV;
2500
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002501 if (dump_load_err)
2502 obj = dev_priv->guc.load_err_log;
2503 else if (dev_priv->guc.log.vma)
2504 obj = dev_priv->guc.log.vma->obj;
2505
2506 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002507 return 0;
2508
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002509 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2510 if (IS_ERR(log)) {
2511 DRM_DEBUG("Failed to pin object\n");
2512 seq_puts(m, "(log data unaccessible)\n");
2513 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002514 }
2515
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002516 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2517 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2518 *(log + i), *(log + i + 1),
2519 *(log + i + 2), *(log + i + 3));
2520
Alex Dai4c7e77f2015-08-12 15:43:40 +01002521 seq_putc(m, '\n');
2522
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002523 i915_gem_object_unpin_map(obj);
2524
Alex Dai4c7e77f2015-08-12 15:43:40 +01002525 return 0;
2526}
2527
Michał Winiarski4977a282018-03-19 10:53:40 +01002528static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302529{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002530 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302531
Michał Winiarski86aa8242018-03-08 16:46:53 +01002532 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002533 return -ENODEV;
2534
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002535 *val = intel_guc_log_get_level(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302536
2537 return 0;
2538}
2539
Michał Winiarski4977a282018-03-19 10:53:40 +01002540static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302541{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002542 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302543
Michał Winiarski86aa8242018-03-08 16:46:53 +01002544 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002545 return -ENODEV;
2546
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002547 return intel_guc_log_set_level(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302548}
2549
Michał Winiarski4977a282018-03-19 10:53:40 +01002550DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2551 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302552 "%lld\n");
2553
Michał Winiarski4977a282018-03-19 10:53:40 +01002554static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2555{
2556 struct drm_i915_private *dev_priv = inode->i_private;
2557
2558 if (!USES_GUC(dev_priv))
2559 return -ENODEV;
2560
2561 file->private_data = &dev_priv->guc.log;
2562
2563 return intel_guc_log_relay_open(&dev_priv->guc.log);
2564}
2565
2566static ssize_t
2567i915_guc_log_relay_write(struct file *filp,
2568 const char __user *ubuf,
2569 size_t cnt,
2570 loff_t *ppos)
2571{
2572 struct intel_guc_log *log = filp->private_data;
2573
2574 intel_guc_log_relay_flush(log);
2575
2576 return cnt;
2577}
2578
2579static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2580{
2581 struct drm_i915_private *dev_priv = inode->i_private;
2582
2583 intel_guc_log_relay_close(&dev_priv->guc.log);
2584
2585 return 0;
2586}
2587
2588static const struct file_operations i915_guc_log_relay_fops = {
2589 .owner = THIS_MODULE,
2590 .open = i915_guc_log_relay_open,
2591 .write = i915_guc_log_relay_write,
2592 .release = i915_guc_log_relay_release,
2593};
2594
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302595static void
2596psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002597{
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302598 u32 val, psr_status;
Chris Wilsonb86bef202017-01-16 13:06:21 +00002599
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302600 if (dev_priv->psr.psr2_enabled) {
2601 static const char * const live_status[] = {
2602 "IDLE",
2603 "CAPTURE",
2604 "CAPTURE_FS",
2605 "SLEEP",
2606 "BUFON_FW",
2607 "ML_UP",
2608 "SU_STANDBY",
2609 "FAST_SLEEP",
2610 "DEEP_SLEEP",
2611 "BUF_ON",
2612 "TG_ON"
2613 };
2614 psr_status = I915_READ(EDP_PSR2_STATUS);
2615 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2616 EDP_PSR2_STATUS_STATE_SHIFT;
2617 if (val < ARRAY_SIZE(live_status)) {
2618 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2619 psr_status, live_status[val]);
2620 return;
2621 }
2622 } else {
2623 static const char * const live_status[] = {
2624 "IDLE",
2625 "SRDONACK",
2626 "SRDENT",
2627 "BUFOFF",
2628 "BUFON",
2629 "AUXACK",
2630 "SRDOFFACK",
2631 "SRDENT_ON",
2632 };
2633 psr_status = I915_READ(EDP_PSR_STATUS);
2634 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2635 EDP_PSR_STATUS_STATE_SHIFT;
2636 if (val < ARRAY_SIZE(live_status)) {
2637 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2638 psr_status, live_status[val]);
2639 return;
2640 }
2641 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002642
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302643 seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
Chris Wilsonb86bef202017-01-16 13:06:21 +00002644}
2645
José Roberto de Souzad0bc8622018-04-25 14:23:33 -07002646static const char *psr_sink_status(u8 val)
2647{
2648 static const char * const sink_status[] = {
2649 "inactive",
2650 "transition to active, capture and display",
2651 "active, display from RFB",
2652 "active, capture and display on sink device timings",
2653 "transition to inactive, capture and display, timing re-sync",
2654 "reserved",
2655 "reserved",
2656 "sink internal error"
2657 };
2658
2659 val &= DP_PSR_SINK_STATE_MASK;
2660 if (val < ARRAY_SIZE(sink_status))
2661 return sink_status[val];
2662
2663 return "unknown";
2664}
2665
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002666static int i915_edp_psr_status(struct seq_file *m, void *data)
2667{
David Weinehall36cdd012016-08-22 13:59:31 +03002668 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Rodrigo Vivia031d702013-10-03 16:15:06 -03002669 u32 psrperf = 0;
2670 bool enabled = false;
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002671 bool sink_support;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002672
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002673 if (!HAS_PSR(dev_priv))
2674 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002675
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002676 sink_support = dev_priv->psr.sink_support;
2677 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2678 if (!sink_support)
2679 return 0;
2680
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002681 intel_runtime_pm_get(dev_priv);
2682
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002683 mutex_lock(&dev_priv->psr.lock);
Daniel Vetter2807cf62014-07-11 10:30:11 -07002684 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002685 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2686 dev_priv->psr.busy_frontbuffer_bits);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002687
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002688 if (dev_priv->psr.psr2_enabled)
2689 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2690 else
2691 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002692
2693 seq_printf(m, "Main link in standby mode: %s\n",
2694 yesno(dev_priv->psr.link_standby));
2695
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002696 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002697
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002698 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002699 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2700 */
David Weinehall36cdd012016-08-22 13:59:31 +03002701 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Ville Syrjälä443a3892015-11-11 20:34:15 +02002702 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
Rodrigo Vivia031d702013-10-03 16:15:06 -03002703 EDP_PSR_PERF_CNT_MASK;
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002704
2705 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2706 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302707
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302708 psr_source_status(dev_priv, m);
José Roberto de Souzad0bc8622018-04-25 14:23:33 -07002709
2710 if (dev_priv->psr.enabled) {
2711 struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
2712 u8 val;
2713
2714 if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
2715 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
2716 psr_sink_status(val));
2717 }
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002718 mutex_unlock(&dev_priv->psr.lock);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002719
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002720 if (READ_ONCE(dev_priv->psr.debug)) {
2721 seq_printf(m, "Last attempted entry at: %lld\n",
2722 dev_priv->psr.last_entry_attempt);
2723 seq_printf(m, "Last exit at: %lld\n",
2724 dev_priv->psr.last_exit);
2725 }
2726
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002727 intel_runtime_pm_put(dev_priv);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002728 return 0;
2729}
2730
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002731static int
2732i915_edp_psr_debug_set(void *data, u64 val)
2733{
2734 struct drm_i915_private *dev_priv = data;
2735
2736 if (!CAN_PSR(dev_priv))
2737 return -ENODEV;
2738
2739 DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
2740
2741 intel_runtime_pm_get(dev_priv);
2742 intel_psr_irq_control(dev_priv, !!val);
2743 intel_runtime_pm_put(dev_priv);
2744
2745 return 0;
2746}
2747
2748static int
2749i915_edp_psr_debug_get(void *data, u64 *val)
2750{
2751 struct drm_i915_private *dev_priv = data;
2752
2753 if (!CAN_PSR(dev_priv))
2754 return -ENODEV;
2755
2756 *val = READ_ONCE(dev_priv->psr.debug);
2757 return 0;
2758}
2759
2760DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2761 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2762 "%llu\n");
2763
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002764static int i915_sink_crc(struct seq_file *m, void *data)
2765{
David Weinehall36cdd012016-08-22 13:59:31 +03002766 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2767 struct drm_device *dev = &dev_priv->drm;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002768 struct intel_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002769 struct drm_connector_list_iter conn_iter;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002770 struct intel_dp *intel_dp = NULL;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002771 struct drm_modeset_acquire_ctx ctx;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002772 int ret;
2773 u8 crc[6];
2774
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002775 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2776
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002777 drm_connector_list_iter_begin(dev, &conn_iter);
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002778
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002779 for_each_intel_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002780 struct drm_crtc *crtc;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002781 struct drm_connector_state *state;
Maarten Lankhorst93313532017-11-10 12:34:59 +01002782 struct intel_crtc_state *crtc_state;
Paulo Zanonib6ae3c72014-02-13 17:51:33 -02002783
Maarten Lankhorst26c17cf2016-06-20 15:57:38 +02002784 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002785 continue;
2786
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002787retry:
2788 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
2789 if (ret)
2790 goto err;
2791
2792 state = connector->base.state;
2793 if (!state->best_encoder)
2794 continue;
2795
2796 crtc = state->crtc;
2797 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2798 if (ret)
2799 goto err;
2800
Maarten Lankhorst93313532017-11-10 12:34:59 +01002801 crtc_state = to_intel_crtc_state(crtc->state);
2802 if (!crtc_state->base.active)
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002803 continue;
2804
Maarten Lankhorst93313532017-11-10 12:34:59 +01002805 /*
2806 * We need to wait for all crtc updates to complete, to make
2807 * sure any pending modesets and plane updates are completed.
2808 */
2809 if (crtc_state->base.commit) {
2810 ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
2811
2812 if (ret)
2813 goto err;
2814 }
2815
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002816 intel_dp = enc_to_intel_dp(state->best_encoder);
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002817
Maarten Lankhorst93313532017-11-10 12:34:59 +01002818 ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002819 if (ret)
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002820 goto err;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002821
2822 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2823 crc[0], crc[1], crc[2],
2824 crc[3], crc[4], crc[5]);
2825 goto out;
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002826
2827err:
2828 if (ret == -EDEADLK) {
2829 ret = drm_modeset_backoff(&ctx);
2830 if (!ret)
2831 goto retry;
2832 }
2833 goto out;
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002834 }
2835 ret = -ENODEV;
2836out:
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002837 drm_connector_list_iter_end(&conn_iter);
Maarten Lankhorst10bf0a32017-11-10 12:34:58 +01002838 drm_modeset_drop_locks(&ctx);
2839 drm_modeset_acquire_fini(&ctx);
2840
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02002841 return ret;
2842}
2843
Jesse Barnesec013e72013-08-20 10:29:23 +01002844static int i915_energy_uJ(struct seq_file *m, void *data)
2845{
David Weinehall36cdd012016-08-22 13:59:31 +03002846 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002847 unsigned long long power;
Jesse Barnesec013e72013-08-20 10:29:23 +01002848 u32 units;
2849
David Weinehall36cdd012016-08-22 13:59:31 +03002850 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002851 return -ENODEV;
2852
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002853 intel_runtime_pm_get(dev_priv);
2854
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002855 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2856 intel_runtime_pm_put(dev_priv);
2857 return -ENODEV;
2858 }
2859
2860 units = (power & 0x1f00) >> 8;
Jesse Barnesec013e72013-08-20 10:29:23 +01002861 power = I915_READ(MCH_SECP_NRG_STTS);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002862 power = (1000000 * power) >> units; /* convert to uJ */
Jesse Barnesec013e72013-08-20 10:29:23 +01002863
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002864 intel_runtime_pm_put(dev_priv);
2865
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002866 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002867
2868 return 0;
2869}
2870
Damien Lespiau6455c872015-06-04 18:23:57 +01002871static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002872{
David Weinehall36cdd012016-08-22 13:59:31 +03002873 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002874 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002875
Chris Wilsona156e642016-04-03 14:14:21 +01002876 if (!HAS_RUNTIME_PM(dev_priv))
2877 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002878
Chris Wilson6f561032018-01-24 11:36:07 +00002879 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2880 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
Paulo Zanoni371db662013-08-19 13:18:10 -03002881 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002882 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002883#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002884 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002885 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002886#else
2887 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2888#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002889 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002890 pci_power_name(pdev->current_state),
2891 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002892
Jesse Barnesec013e72013-08-20 10:29:23 +01002893 return 0;
2894}
2895
Imre Deak1da51582013-11-25 17:15:35 +02002896static int i915_power_domain_info(struct seq_file *m, void *unused)
2897{
David Weinehall36cdd012016-08-22 13:59:31 +03002898 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002899 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2900 int i;
2901
2902 mutex_lock(&power_domains->lock);
2903
2904 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2905 for (i = 0; i < power_domains->power_well_count; i++) {
2906 struct i915_power_well *power_well;
2907 enum intel_display_power_domain power_domain;
2908
2909 power_well = &power_domains->power_wells[i];
2910 seq_printf(m, "%-25s %d\n", power_well->name,
2911 power_well->count);
2912
Joonas Lahtinen8385c2e2017-02-08 15:12:10 +02002913 for_each_power_domain(power_domain, power_well->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002914 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002915 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002916 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002917 }
2918
2919 mutex_unlock(&power_domains->lock);
2920
2921 return 0;
2922}
2923
Damien Lespiaub7cec662015-10-27 14:47:01 +02002924static int i915_dmc_info(struct seq_file *m, void *unused)
2925{
David Weinehall36cdd012016-08-22 13:59:31 +03002926 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Damien Lespiaub7cec662015-10-27 14:47:01 +02002927 struct intel_csr *csr;
2928
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002929 if (!HAS_CSR(dev_priv))
2930 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002931
2932 csr = &dev_priv->csr;
2933
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002934 intel_runtime_pm_get(dev_priv);
2935
Damien Lespiaub7cec662015-10-27 14:47:01 +02002936 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2937 seq_printf(m, "path: %s\n", csr->fw_path);
2938
2939 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002940 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002941
2942 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2943 CSR_VERSION_MINOR(csr->version));
2944
Mika Kuoppala48de5682017-05-09 13:05:22 +03002945 if (IS_KABYLAKE(dev_priv) ||
2946 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
Damien Lespiau83372062015-10-30 17:53:32 +02002947 seq_printf(m, "DC3 -> DC5 count: %d\n",
2948 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2949 seq_printf(m, "DC5 -> DC6 count: %d\n",
2950 I915_READ(SKL_CSR_DC5_DC6_COUNT));
David Weinehall36cdd012016-08-22 13:59:31 +03002951 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
Mika Kuoppala16e11b92015-10-27 14:47:03 +02002952 seq_printf(m, "DC3 -> DC5 count: %d\n",
2953 I915_READ(BXT_CSR_DC3_DC5_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002954 }
2955
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002956out:
2957 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2958 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2959 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2960
Damien Lespiau83372062015-10-30 17:53:32 +02002961 intel_runtime_pm_put(dev_priv);
2962
Damien Lespiaub7cec662015-10-27 14:47:01 +02002963 return 0;
2964}
2965
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002966static void intel_seq_print_mode(struct seq_file *m, int tabs,
2967 struct drm_display_mode *mode)
2968{
2969 int i;
2970
2971 for (i = 0; i < tabs; i++)
2972 seq_putc(m, '\t');
2973
2974 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2975 mode->base.id, mode->name,
2976 mode->vrefresh, mode->clock,
2977 mode->hdisplay, mode->hsync_start,
2978 mode->hsync_end, mode->htotal,
2979 mode->vdisplay, mode->vsync_start,
2980 mode->vsync_end, mode->vtotal,
2981 mode->type, mode->flags);
2982}
2983
2984static void intel_encoder_info(struct seq_file *m,
2985 struct intel_crtc *intel_crtc,
2986 struct intel_encoder *intel_encoder)
2987{
David Weinehall36cdd012016-08-22 13:59:31 +03002988 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2989 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002990 struct drm_crtc *crtc = &intel_crtc->base;
2991 struct intel_connector *intel_connector;
2992 struct drm_encoder *encoder;
2993
2994 encoder = &intel_encoder->base;
2995 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002996 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002997 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2998 struct drm_connector *connector = &intel_connector->base;
2999 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
3000 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03003001 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003002 drm_get_connector_status_name(connector->status));
3003 if (connector->status == connector_status_connected) {
3004 struct drm_display_mode *mode = &crtc->mode;
3005 seq_printf(m, ", mode:\n");
3006 intel_seq_print_mode(m, 2, mode);
3007 } else {
3008 seq_putc(m, '\n');
3009 }
3010 }
3011}
3012
3013static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3014{
David Weinehall36cdd012016-08-22 13:59:31 +03003015 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3016 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003017 struct drm_crtc *crtc = &intel_crtc->base;
3018 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003019 struct drm_plane_state *plane_state = crtc->primary->state;
3020 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003021
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003022 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07003023 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02003024 fb->base.id, plane_state->src_x >> 16,
3025 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07003026 else
3027 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003028 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3029 intel_encoder_info(m, intel_crtc, intel_encoder);
3030}
3031
3032static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3033{
3034 struct drm_display_mode *mode = panel->fixed_mode;
3035
3036 seq_printf(m, "\tfixed mode:\n");
3037 intel_seq_print_mode(m, 2, mode);
3038}
3039
3040static void intel_dp_info(struct seq_file *m,
3041 struct intel_connector *intel_connector)
3042{
3043 struct intel_encoder *intel_encoder = intel_connector->encoder;
3044 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3045
3046 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03003047 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003048 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003049 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03003050
3051 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3052 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003053}
3054
Libin Yang9a148a92016-11-28 20:07:05 +08003055static void intel_dp_mst_info(struct seq_file *m,
3056 struct intel_connector *intel_connector)
3057{
3058 struct intel_encoder *intel_encoder = intel_connector->encoder;
3059 struct intel_dp_mst_encoder *intel_mst =
3060 enc_to_mst(&intel_encoder->base);
3061 struct intel_digital_port *intel_dig_port = intel_mst->primary;
3062 struct intel_dp *intel_dp = &intel_dig_port->dp;
3063 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3064 intel_connector->port);
3065
3066 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3067}
3068
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003069static void intel_hdmi_info(struct seq_file *m,
3070 struct intel_connector *intel_connector)
3071{
3072 struct intel_encoder *intel_encoder = intel_connector->encoder;
3073 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3074
Jani Nikula742f4912015-09-03 11:16:09 +03003075 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003076}
3077
3078static void intel_lvds_info(struct seq_file *m,
3079 struct intel_connector *intel_connector)
3080{
3081 intel_panel_info(m, &intel_connector->panel);
3082}
3083
3084static void intel_connector_info(struct seq_file *m,
3085 struct drm_connector *connector)
3086{
3087 struct intel_connector *intel_connector = to_intel_connector(connector);
3088 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08003089 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003090
3091 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03003092 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003093 drm_get_connector_status_name(connector->status));
3094 if (connector->status == connector_status_connected) {
3095 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3096 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3097 connector->display_info.width_mm,
3098 connector->display_info.height_mm);
3099 seq_printf(m, "\tsubpixel order: %s\n",
3100 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3101 seq_printf(m, "\tCEA rev: %d\n",
3102 connector->display_info.cea_rev);
3103 }
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003104
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02003105 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003106 return;
3107
3108 switch (connector->connector_type) {
3109 case DRM_MODE_CONNECTOR_DisplayPort:
3110 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08003111 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3112 intel_dp_mst_info(m, intel_connector);
3113 else
3114 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003115 break;
3116 case DRM_MODE_CONNECTOR_LVDS:
3117 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10003118 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003119 break;
3120 case DRM_MODE_CONNECTOR_HDMIA:
3121 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03003122 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003123 intel_hdmi_info(m, intel_connector);
3124 break;
3125 default:
3126 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10003127 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003128
Jesse Barnesf103fc72014-02-20 12:39:57 -08003129 seq_printf(m, "\tmodes:\n");
3130 list_for_each_entry(mode, &connector->modes, head)
3131 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003132}
3133
Robert Fekete3abc4e02015-10-27 16:58:32 +01003134static const char *plane_type(enum drm_plane_type type)
3135{
3136 switch (type) {
3137 case DRM_PLANE_TYPE_OVERLAY:
3138 return "OVL";
3139 case DRM_PLANE_TYPE_PRIMARY:
3140 return "PRI";
3141 case DRM_PLANE_TYPE_CURSOR:
3142 return "CUR";
3143 /*
3144 * Deliberately omitting default: to generate compiler warnings
3145 * when a new drm_plane_type gets added.
3146 */
3147 }
3148
3149 return "unknown";
3150}
3151
3152static const char *plane_rotation(unsigned int rotation)
3153{
3154 static char buf[48];
3155 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04003156 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01003157 * will print them all to visualize if the values are misused
3158 */
3159 snprintf(buf, sizeof(buf),
3160 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04003161 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3162 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3163 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3164 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3165 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3166 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01003167 rotation);
3168
3169 return buf;
3170}
3171
3172static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3173{
David Weinehall36cdd012016-08-22 13:59:31 +03003174 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3175 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003176 struct intel_plane *intel_plane;
3177
3178 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3179 struct drm_plane_state *state;
3180 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003181 struct drm_format_name_buf format_name;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003182
3183 if (!plane->state) {
3184 seq_puts(m, "plane->state is NULL!\n");
3185 continue;
3186 }
3187
3188 state = plane->state;
3189
Eric Engestrom90844f02016-08-15 01:02:38 +01003190 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003191 drm_get_format_name(state->fb->format->format,
3192 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01003193 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003194 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01003195 }
3196
Robert Fekete3abc4e02015-10-27 16:58:32 +01003197 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3198 plane->base.id,
3199 plane_type(intel_plane->base.type),
3200 state->crtc_x, state->crtc_y,
3201 state->crtc_w, state->crtc_h,
3202 (state->src_x >> 16),
3203 ((state->src_x & 0xffff) * 15625) >> 10,
3204 (state->src_y >> 16),
3205 ((state->src_y & 0xffff) * 15625) >> 10,
3206 (state->src_w >> 16),
3207 ((state->src_w & 0xffff) * 15625) >> 10,
3208 (state->src_h >> 16),
3209 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003210 format_name.str,
Robert Fekete3abc4e02015-10-27 16:58:32 +01003211 plane_rotation(state->rotation));
3212 }
3213}
3214
3215static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3216{
3217 struct intel_crtc_state *pipe_config;
3218 int num_scalers = intel_crtc->num_scalers;
3219 int i;
3220
3221 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3222
3223 /* Not all platformas have a scaler */
3224 if (num_scalers) {
3225 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3226 num_scalers,
3227 pipe_config->scaler_state.scaler_users,
3228 pipe_config->scaler_state.scaler_id);
3229
A.Sunil Kamath58415912016-11-20 23:20:26 +05303230 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003231 struct intel_scaler *sc =
3232 &pipe_config->scaler_state.scalers[i];
3233
3234 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3235 i, yesno(sc->in_use), sc->mode);
3236 }
3237 seq_puts(m, "\n");
3238 } else {
3239 seq_puts(m, "\tNo scalers available on this platform\n");
3240 }
3241}
3242
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003243static int i915_display_info(struct seq_file *m, void *unused)
3244{
David Weinehall36cdd012016-08-22 13:59:31 +03003245 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3246 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec2014-03-12 09:13:13 +00003247 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003248 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003249 struct drm_connector_list_iter conn_iter;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003250
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003251 intel_runtime_pm_get(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003252 seq_printf(m, "CRTC info\n");
3253 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003254 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003255 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003256
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003257 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003258 pipe_config = to_intel_crtc_state(crtc->base.state);
3259
Robert Fekete3abc4e02015-10-27 16:58:32 +01003260 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec2014-03-12 09:13:13 +00003261 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003262 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003263 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3264 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3265
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003266 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003267 struct intel_plane *cursor =
3268 to_intel_plane(crtc->base.cursor);
3269
Chris Wilson065f2ec2014-03-12 09:13:13 +00003270 intel_crtc_info(m, crtc);
3271
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003272 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3273 yesno(cursor->base.state->visible),
3274 cursor->base.state->crtc_x,
3275 cursor->base.state->crtc_y,
3276 cursor->base.state->crtc_w,
3277 cursor->base.state->crtc_h,
3278 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003279 intel_scaler_info(m, crtc);
3280 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003281 }
Daniel Vettercace8412014-05-22 17:56:31 +02003282
3283 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3284 yesno(!crtc->cpu_fifo_underrun_disabled),
3285 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003286 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003287 }
3288
3289 seq_printf(m, "\n");
3290 seq_printf(m, "Connector info\n");
3291 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003292 mutex_lock(&dev->mode_config.mutex);
3293 drm_connector_list_iter_begin(dev, &conn_iter);
3294 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003295 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003296 drm_connector_list_iter_end(&conn_iter);
3297 mutex_unlock(&dev->mode_config.mutex);
3298
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003299 intel_runtime_pm_put(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003300
3301 return 0;
3302}
3303
Chris Wilson1b365952016-10-04 21:11:31 +01003304static int i915_engine_info(struct seq_file *m, void *unused)
3305{
3306 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3307 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05303308 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003309 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003310
Chris Wilson9c870d02016-10-24 13:42:15 +01003311 intel_runtime_pm_get(dev_priv);
3312
Chris Wilson6f561032018-01-24 11:36:07 +00003313 seq_printf(m, "GT awake? %s (epoch %u)\n",
3314 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003315 seq_printf(m, "Global active requests: %d\n",
3316 dev_priv->gt.active_requests);
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003317 seq_printf(m, "CS timestamp frequency: %u kHz\n",
3318 dev_priv->info.cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003319
Chris Wilsonf636edb2017-10-09 12:02:57 +01003320 p = drm_seq_file_printer(m);
3321 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003322 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003323
Chris Wilson9c870d02016-10-24 13:42:15 +01003324 intel_runtime_pm_put(dev_priv);
3325
Chris Wilson1b365952016-10-04 21:11:31 +01003326 return 0;
3327}
3328
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003329static int i915_rcs_topology(struct seq_file *m, void *unused)
3330{
3331 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3332 struct drm_printer p = drm_seq_file_printer(m);
3333
3334 intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3335
3336 return 0;
3337}
3338
Chris Wilsonc5418a82017-10-13 21:26:19 +01003339static int i915_shrinker_info(struct seq_file *m, void *unused)
3340{
3341 struct drm_i915_private *i915 = node_to_i915(m->private);
3342
3343 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3344 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3345
3346 return 0;
3347}
3348
Daniel Vetter728e29d2014-06-25 22:01:53 +03003349static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3350{
David Weinehall36cdd012016-08-22 13:59:31 +03003351 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3352 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003353 int i;
3354
3355 drm_modeset_lock_all(dev);
3356 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3357 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3358
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003359 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003360 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003361 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003362 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003363 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003364 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003365 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003366 pll->state.hw_state.dpll_md);
3367 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3368 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3369 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07003370 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3371 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3372 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3373 pll->state.hw_state.mg_refclkin_ctl);
3374 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3375 pll->state.hw_state.mg_clktop2_coreclkctl1);
3376 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3377 pll->state.hw_state.mg_clktop2_hsclkctl);
3378 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3379 pll->state.hw_state.mg_pll_div0);
3380 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3381 pll->state.hw_state.mg_pll_div1);
3382 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3383 pll->state.hw_state.mg_pll_lf);
3384 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3385 pll->state.hw_state.mg_pll_frac_lock);
3386 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3387 pll->state.hw_state.mg_pll_ssc);
3388 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3389 pll->state.hw_state.mg_pll_bias);
3390 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3391 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003392 }
3393 drm_modeset_unlock_all(dev);
3394
3395 return 0;
3396}
3397
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003398static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003399{
Chris Wilson548764b2018-06-15 13:02:07 +01003400 struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
Chris Wilsonf4ecfbf2018-04-14 13:27:54 +01003401 int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003402
Chris Wilson548764b2018-06-15 13:02:07 +01003403 seq_printf(m, "Workarounds applied: %d\n", wa->count);
3404 for (i = 0; i < wa->count; ++i)
3405 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3406 wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
Arun Siluvery888b5992014-08-26 14:44:51 +01003407
3408 return 0;
3409}
3410
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303411static int i915_ipc_status_show(struct seq_file *m, void *data)
3412{
3413 struct drm_i915_private *dev_priv = m->private;
3414
3415 seq_printf(m, "Isochronous Priority Control: %s\n",
3416 yesno(dev_priv->ipc_enabled));
3417 return 0;
3418}
3419
3420static int i915_ipc_status_open(struct inode *inode, struct file *file)
3421{
3422 struct drm_i915_private *dev_priv = inode->i_private;
3423
3424 if (!HAS_IPC(dev_priv))
3425 return -ENODEV;
3426
3427 return single_open(file, i915_ipc_status_show, dev_priv);
3428}
3429
3430static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3431 size_t len, loff_t *offp)
3432{
3433 struct seq_file *m = file->private_data;
3434 struct drm_i915_private *dev_priv = m->private;
3435 int ret;
3436 bool enable;
3437
3438 ret = kstrtobool_from_user(ubuf, len, &enable);
3439 if (ret < 0)
3440 return ret;
3441
3442 intel_runtime_pm_get(dev_priv);
3443 if (!dev_priv->ipc_enabled && enable)
3444 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3445 dev_priv->wm.distrust_bios_wm = true;
3446 dev_priv->ipc_enabled = enable;
3447 intel_enable_ipc(dev_priv);
3448 intel_runtime_pm_put(dev_priv);
3449
3450 return len;
3451}
3452
3453static const struct file_operations i915_ipc_status_fops = {
3454 .owner = THIS_MODULE,
3455 .open = i915_ipc_status_open,
3456 .read = seq_read,
3457 .llseek = seq_lseek,
3458 .release = single_release,
3459 .write = i915_ipc_status_write
3460};
3461
Damien Lespiauc5511e42014-11-04 17:06:51 +00003462static int i915_ddb_info(struct seq_file *m, void *unused)
3463{
David Weinehall36cdd012016-08-22 13:59:31 +03003464 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3465 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003466 struct skl_ddb_allocation *ddb;
3467 struct skl_ddb_entry *entry;
3468 enum pipe pipe;
3469 int plane;
3470
David Weinehall36cdd012016-08-22 13:59:31 +03003471 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003472 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003473
Damien Lespiauc5511e42014-11-04 17:06:51 +00003474 drm_modeset_lock_all(dev);
3475
3476 ddb = &dev_priv->wm.skl_hw.ddb;
3477
3478 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3479
3480 for_each_pipe(dev_priv, pipe) {
3481 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3482
Matt Roper8b364b42016-10-26 15:51:28 -07003483 for_each_universal_plane(dev_priv, pipe, plane) {
Damien Lespiauc5511e42014-11-04 17:06:51 +00003484 entry = &ddb->plane[pipe][plane];
3485 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3486 entry->start, entry->end,
3487 skl_ddb_entry_size(entry));
3488 }
3489
Matt Roper4969d332015-09-24 15:53:10 -07003490 entry = &ddb->plane[pipe][PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003491 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3492 entry->end, skl_ddb_entry_size(entry));
3493 }
3494
3495 drm_modeset_unlock_all(dev);
3496
3497 return 0;
3498}
3499
Vandana Kannana54746e2015-03-03 20:53:10 +05303500static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003501 struct drm_device *dev,
3502 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303503{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003504 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303505 struct i915_drrs *drrs = &dev_priv->drrs;
3506 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003507 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003508 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303509
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003510 drm_connector_list_iter_begin(dev, &conn_iter);
3511 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003512 if (connector->state->crtc != &intel_crtc->base)
3513 continue;
3514
3515 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303516 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003517 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303518
3519 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3520 seq_puts(m, "\tVBT: DRRS_type: Static");
3521 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3522 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3523 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3524 seq_puts(m, "\tVBT: DRRS_type: None");
3525 else
3526 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3527
3528 seq_puts(m, "\n\n");
3529
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003530 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303531 struct intel_panel *panel;
3532
3533 mutex_lock(&drrs->mutex);
3534 /* DRRS Supported */
3535 seq_puts(m, "\tDRRS Supported: Yes\n");
3536
3537 /* disable_drrs() will make drrs->dp NULL */
3538 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303539 seq_puts(m, "Idleness DRRS: Disabled\n");
3540 if (dev_priv->psr.enabled)
3541 seq_puts(m,
3542 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303543 mutex_unlock(&drrs->mutex);
3544 return;
3545 }
3546
3547 panel = &drrs->dp->attached_connector->panel;
3548 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3549 drrs->busy_frontbuffer_bits);
3550
3551 seq_puts(m, "\n\t\t");
3552 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3553 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3554 vrefresh = panel->fixed_mode->vrefresh;
3555 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3556 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3557 vrefresh = panel->downclock_mode->vrefresh;
3558 } else {
3559 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3560 drrs->refresh_rate_type);
3561 mutex_unlock(&drrs->mutex);
3562 return;
3563 }
3564 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3565
3566 seq_puts(m, "\n\t\t");
3567 mutex_unlock(&drrs->mutex);
3568 } else {
3569 /* DRRS not supported. Print the VBT parameter*/
3570 seq_puts(m, "\tDRRS Supported : No");
3571 }
3572 seq_puts(m, "\n");
3573}
3574
3575static int i915_drrs_status(struct seq_file *m, void *unused)
3576{
David Weinehall36cdd012016-08-22 13:59:31 +03003577 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3578 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303579 struct intel_crtc *intel_crtc;
3580 int active_crtc_cnt = 0;
3581
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003582 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303583 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003584 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303585 active_crtc_cnt++;
3586 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3587
3588 drrs_status_per_crtc(m, dev, intel_crtc);
3589 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303590 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003591 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303592
3593 if (!active_crtc_cnt)
3594 seq_puts(m, "No active crtc found\n");
3595
3596 return 0;
3597}
3598
Dave Airlie11bed952014-05-12 15:22:27 +10003599static int i915_dp_mst_info(struct seq_file *m, void *unused)
3600{
David Weinehall36cdd012016-08-22 13:59:31 +03003601 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3602 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003603 struct intel_encoder *intel_encoder;
3604 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003605 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003606 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003607
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003608 drm_connector_list_iter_begin(dev, &conn_iter);
3609 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003610 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003611 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003612
3613 intel_encoder = intel_attached_encoder(connector);
3614 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3615 continue;
3616
3617 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003618 if (!intel_dig_port->dp.can_mst)
3619 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003620
Jim Bride40ae80c2016-04-14 10:18:37 -07003621 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003622 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003623 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3624 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003625 drm_connector_list_iter_end(&conn_iter);
3626
Dave Airlie11bed952014-05-12 15:22:27 +10003627 return 0;
3628}
3629
Todd Previteeb3394fa2015-04-18 00:04:19 -07003630static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003631 const char __user *ubuf,
3632 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003633{
3634 char *input_buffer;
3635 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003636 struct drm_device *dev;
3637 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003638 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003639 struct intel_dp *intel_dp;
3640 int val = 0;
3641
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303642 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003643
Todd Previteeb3394fa2015-04-18 00:04:19 -07003644 if (len == 0)
3645 return 0;
3646
Geliang Tang261aeba2017-05-06 23:40:17 +08003647 input_buffer = memdup_user_nul(ubuf, len);
3648 if (IS_ERR(input_buffer))
3649 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003650
Todd Previteeb3394fa2015-04-18 00:04:19 -07003651 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3652
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003653 drm_connector_list_iter_begin(dev, &conn_iter);
3654 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003655 struct intel_encoder *encoder;
3656
Todd Previteeb3394fa2015-04-18 00:04:19 -07003657 if (connector->connector_type !=
3658 DRM_MODE_CONNECTOR_DisplayPort)
3659 continue;
3660
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003661 encoder = to_intel_encoder(connector->encoder);
3662 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3663 continue;
3664
3665 if (encoder && connector->status == connector_status_connected) {
3666 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003667 status = kstrtoint(input_buffer, 10, &val);
3668 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003669 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003670 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3671 /* To prevent erroneous activation of the compliance
3672 * testing code, only accept an actual value of 1 here
3673 */
3674 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003675 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003676 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003677 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003678 }
3679 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003680 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003681 kfree(input_buffer);
3682 if (status < 0)
3683 return status;
3684
3685 *offp += len;
3686 return len;
3687}
3688
3689static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3690{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003691 struct drm_i915_private *dev_priv = m->private;
3692 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003693 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003694 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003695 struct intel_dp *intel_dp;
3696
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003697 drm_connector_list_iter_begin(dev, &conn_iter);
3698 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003699 struct intel_encoder *encoder;
3700
Todd Previteeb3394fa2015-04-18 00:04:19 -07003701 if (connector->connector_type !=
3702 DRM_MODE_CONNECTOR_DisplayPort)
3703 continue;
3704
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003705 encoder = to_intel_encoder(connector->encoder);
3706 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3707 continue;
3708
3709 if (encoder && connector->status == connector_status_connected) {
3710 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003711 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003712 seq_puts(m, "1");
3713 else
3714 seq_puts(m, "0");
3715 } else
3716 seq_puts(m, "0");
3717 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003718 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003719
3720 return 0;
3721}
3722
3723static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003724 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003725{
David Weinehall36cdd012016-08-22 13:59:31 +03003726 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003727 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003728}
3729
3730static const struct file_operations i915_displayport_test_active_fops = {
3731 .owner = THIS_MODULE,
3732 .open = i915_displayport_test_active_open,
3733 .read = seq_read,
3734 .llseek = seq_lseek,
3735 .release = single_release,
3736 .write = i915_displayport_test_active_write
3737};
3738
3739static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3740{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003741 struct drm_i915_private *dev_priv = m->private;
3742 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003743 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003744 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003745 struct intel_dp *intel_dp;
3746
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003747 drm_connector_list_iter_begin(dev, &conn_iter);
3748 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003749 struct intel_encoder *encoder;
3750
Todd Previteeb3394fa2015-04-18 00:04:19 -07003751 if (connector->connector_type !=
3752 DRM_MODE_CONNECTOR_DisplayPort)
3753 continue;
3754
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003755 encoder = to_intel_encoder(connector->encoder);
3756 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3757 continue;
3758
3759 if (encoder && connector->status == connector_status_connected) {
3760 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003761 if (intel_dp->compliance.test_type ==
3762 DP_TEST_LINK_EDID_READ)
3763 seq_printf(m, "%lx",
3764 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003765 else if (intel_dp->compliance.test_type ==
3766 DP_TEST_LINK_VIDEO_PATTERN) {
3767 seq_printf(m, "hdisplay: %d\n",
3768 intel_dp->compliance.test_data.hdisplay);
3769 seq_printf(m, "vdisplay: %d\n",
3770 intel_dp->compliance.test_data.vdisplay);
3771 seq_printf(m, "bpc: %u\n",
3772 intel_dp->compliance.test_data.bpc);
3773 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003774 } else
3775 seq_puts(m, "0");
3776 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003777 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003778
3779 return 0;
3780}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003781DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003782
3783static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3784{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003785 struct drm_i915_private *dev_priv = m->private;
3786 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003787 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003788 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003789 struct intel_dp *intel_dp;
3790
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003791 drm_connector_list_iter_begin(dev, &conn_iter);
3792 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003793 struct intel_encoder *encoder;
3794
Todd Previteeb3394fa2015-04-18 00:04:19 -07003795 if (connector->connector_type !=
3796 DRM_MODE_CONNECTOR_DisplayPort)
3797 continue;
3798
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003799 encoder = to_intel_encoder(connector->encoder);
3800 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3801 continue;
3802
3803 if (encoder && connector->status == connector_status_connected) {
3804 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003805 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003806 } else
3807 seq_puts(m, "0");
3808 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003809 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003810
3811 return 0;
3812}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003813DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003814
Damien Lespiau97e94b22014-11-04 17:06:50 +00003815static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003816{
David Weinehall36cdd012016-08-22 13:59:31 +03003817 struct drm_i915_private *dev_priv = m->private;
3818 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003819 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003820 int num_levels;
3821
David Weinehall36cdd012016-08-22 13:59:31 +03003822 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003823 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003824 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003825 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003826 else if (IS_G4X(dev_priv))
3827 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003828 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003829 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003830
3831 drm_modeset_lock_all(dev);
3832
3833 for (level = 0; level < num_levels; level++) {
3834 unsigned int latency = wm[level];
3835
Damien Lespiau97e94b22014-11-04 17:06:50 +00003836 /*
3837 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003838 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003839 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003840 if (INTEL_GEN(dev_priv) >= 9 ||
3841 IS_VALLEYVIEW(dev_priv) ||
3842 IS_CHERRYVIEW(dev_priv) ||
3843 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003844 latency *= 10;
3845 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003846 latency *= 5;
3847
3848 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003849 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003850 }
3851
3852 drm_modeset_unlock_all(dev);
3853}
3854
3855static int pri_wm_latency_show(struct seq_file *m, void *data)
3856{
David Weinehall36cdd012016-08-22 13:59:31 +03003857 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003858 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003859
David Weinehall36cdd012016-08-22 13:59:31 +03003860 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003861 latencies = dev_priv->wm.skl_latency;
3862 else
David Weinehall36cdd012016-08-22 13:59:31 +03003863 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003864
3865 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003866
3867 return 0;
3868}
3869
3870static int spr_wm_latency_show(struct seq_file *m, void *data)
3871{
David Weinehall36cdd012016-08-22 13:59:31 +03003872 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003873 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003874
David Weinehall36cdd012016-08-22 13:59:31 +03003875 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003876 latencies = dev_priv->wm.skl_latency;
3877 else
David Weinehall36cdd012016-08-22 13:59:31 +03003878 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003879
3880 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003881
3882 return 0;
3883}
3884
3885static int cur_wm_latency_show(struct seq_file *m, void *data)
3886{
David Weinehall36cdd012016-08-22 13:59:31 +03003887 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003888 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003889
David Weinehall36cdd012016-08-22 13:59:31 +03003890 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003891 latencies = dev_priv->wm.skl_latency;
3892 else
David Weinehall36cdd012016-08-22 13:59:31 +03003893 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003894
3895 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003896
3897 return 0;
3898}
3899
3900static int pri_wm_latency_open(struct inode *inode, struct file *file)
3901{
David Weinehall36cdd012016-08-22 13:59:31 +03003902 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003903
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003904 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003905 return -ENODEV;
3906
David Weinehall36cdd012016-08-22 13:59:31 +03003907 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003908}
3909
3910static int spr_wm_latency_open(struct inode *inode, struct file *file)
3911{
David Weinehall36cdd012016-08-22 13:59:31 +03003912 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003913
David Weinehall36cdd012016-08-22 13:59:31 +03003914 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003915 return -ENODEV;
3916
David Weinehall36cdd012016-08-22 13:59:31 +03003917 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003918}
3919
3920static int cur_wm_latency_open(struct inode *inode, struct file *file)
3921{
David Weinehall36cdd012016-08-22 13:59:31 +03003922 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003923
David Weinehall36cdd012016-08-22 13:59:31 +03003924 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003925 return -ENODEV;
3926
David Weinehall36cdd012016-08-22 13:59:31 +03003927 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003928}
3929
3930static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Damien Lespiau97e94b22014-11-04 17:06:50 +00003931 size_t len, loff_t *offp, uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003932{
3933 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003934 struct drm_i915_private *dev_priv = m->private;
3935 struct drm_device *dev = &dev_priv->drm;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003936 uint16_t new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003937 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003938 int level;
3939 int ret;
3940 char tmp[32];
3941
David Weinehall36cdd012016-08-22 13:59:31 +03003942 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003943 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003944 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003945 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003946 else if (IS_G4X(dev_priv))
3947 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003948 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003949 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003950
Ville Syrjälä369a1342014-01-22 14:36:08 +02003951 if (len >= sizeof(tmp))
3952 return -EINVAL;
3953
3954 if (copy_from_user(tmp, ubuf, len))
3955 return -EFAULT;
3956
3957 tmp[len] = '\0';
3958
Damien Lespiau97e94b22014-11-04 17:06:50 +00003959 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3960 &new[0], &new[1], &new[2], &new[3],
3961 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003962 if (ret != num_levels)
3963 return -EINVAL;
3964
3965 drm_modeset_lock_all(dev);
3966
3967 for (level = 0; level < num_levels; level++)
3968 wm[level] = new[level];
3969
3970 drm_modeset_unlock_all(dev);
3971
3972 return len;
3973}
3974
3975
3976static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3977 size_t len, loff_t *offp)
3978{
3979 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003980 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003981 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003982
David Weinehall36cdd012016-08-22 13:59:31 +03003983 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003984 latencies = dev_priv->wm.skl_latency;
3985 else
David Weinehall36cdd012016-08-22 13:59:31 +03003986 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003987
3988 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003989}
3990
3991static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3992 size_t len, loff_t *offp)
3993{
3994 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003995 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003996 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003997
David Weinehall36cdd012016-08-22 13:59:31 +03003998 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003999 latencies = dev_priv->wm.skl_latency;
4000 else
David Weinehall36cdd012016-08-22 13:59:31 +03004001 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00004002
4003 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02004004}
4005
4006static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4007 size_t len, loff_t *offp)
4008{
4009 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03004010 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00004011 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02004012
David Weinehall36cdd012016-08-22 13:59:31 +03004013 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00004014 latencies = dev_priv->wm.skl_latency;
4015 else
David Weinehall36cdd012016-08-22 13:59:31 +03004016 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00004017
4018 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02004019}
4020
4021static const struct file_operations i915_pri_wm_latency_fops = {
4022 .owner = THIS_MODULE,
4023 .open = pri_wm_latency_open,
4024 .read = seq_read,
4025 .llseek = seq_lseek,
4026 .release = single_release,
4027 .write = pri_wm_latency_write
4028};
4029
4030static const struct file_operations i915_spr_wm_latency_fops = {
4031 .owner = THIS_MODULE,
4032 .open = spr_wm_latency_open,
4033 .read = seq_read,
4034 .llseek = seq_lseek,
4035 .release = single_release,
4036 .write = spr_wm_latency_write
4037};
4038
4039static const struct file_operations i915_cur_wm_latency_fops = {
4040 .owner = THIS_MODULE,
4041 .open = cur_wm_latency_open,
4042 .read = seq_read,
4043 .llseek = seq_lseek,
4044 .release = single_release,
4045 .write = cur_wm_latency_write
4046};
4047
Kees Cook647416f2013-03-10 14:10:06 -07004048static int
4049i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004050{
David Weinehall36cdd012016-08-22 13:59:31 +03004051 struct drm_i915_private *dev_priv = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004052
Chris Wilsond98c52c2016-04-13 17:35:05 +01004053 *val = i915_terminally_wedged(&dev_priv->gpu_error);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004054
Kees Cook647416f2013-03-10 14:10:06 -07004055 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004056}
4057
Kees Cook647416f2013-03-10 14:10:06 -07004058static int
4059i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004060{
Chris Wilson598b6b52017-03-25 13:47:35 +00004061 struct drm_i915_private *i915 = data;
4062 struct intel_engine_cs *engine;
4063 unsigned int tmp;
Imre Deakd46c0512014-04-14 20:24:27 +03004064
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02004065 /*
4066 * There is no safeguard against this debugfs entry colliding
4067 * with the hangcheck calling same i915_handle_error() in
4068 * parallel, causing an explosion. For now we assume that the
4069 * test harness is responsible enough not to inject gpu hangs
4070 * while it is writing to 'i915_wedged'
4071 */
4072
Chris Wilson598b6b52017-03-25 13:47:35 +00004073 if (i915_reset_backoff(&i915->gpu_error))
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02004074 return -EAGAIN;
4075
Chris Wilson598b6b52017-03-25 13:47:35 +00004076 for_each_engine_masked(engine, i915, val, tmp) {
4077 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4078 engine->hangcheck.stalled = true;
4079 }
Imre Deakd46c0512014-04-14 20:24:27 +03004080
Chris Wilsonce800752018-03-20 10:04:49 +00004081 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4082 "Manually set wedged engine mask = %llx", val);
Chris Wilson598b6b52017-03-25 13:47:35 +00004083
4084 wait_on_bit(&i915->gpu_error.flags,
Chris Wilsond3df42b2017-03-16 17:13:05 +00004085 I915_RESET_HANDOFF,
4086 TASK_UNINTERRUPTIBLE);
4087
Kees Cook647416f2013-03-10 14:10:06 -07004088 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004089}
4090
Kees Cook647416f2013-03-10 14:10:06 -07004091DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4092 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03004093 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004094
Kees Cook647416f2013-03-10 14:10:06 -07004095static int
Chris Wilson64486ae2017-03-07 15:59:08 +00004096fault_irq_set(struct drm_i915_private *i915,
4097 unsigned long *irq,
4098 unsigned long val)
4099{
4100 int err;
4101
4102 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4103 if (err)
4104 return err;
4105
4106 err = i915_gem_wait_for_idle(i915,
4107 I915_WAIT_LOCKED |
4108 I915_WAIT_INTERRUPTIBLE);
4109 if (err)
4110 goto err_unlock;
4111
Chris Wilson64486ae2017-03-07 15:59:08 +00004112 *irq = val;
4113 mutex_unlock(&i915->drm.struct_mutex);
4114
4115 /* Flush idle worker to disarm irq */
Chris Wilson7c262402017-10-06 11:40:38 +01004116 drain_delayed_work(&i915->gt.idle_work);
Chris Wilson64486ae2017-03-07 15:59:08 +00004117
4118 return 0;
4119
4120err_unlock:
4121 mutex_unlock(&i915->drm.struct_mutex);
4122 return err;
4123}
4124
4125static int
Chris Wilson094f9a52013-09-25 17:34:55 +01004126i915_ring_missed_irq_get(void *data, u64 *val)
4127{
David Weinehall36cdd012016-08-22 13:59:31 +03004128 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004129
4130 *val = dev_priv->gpu_error.missed_irq_rings;
4131 return 0;
4132}
4133
4134static int
4135i915_ring_missed_irq_set(void *data, u64 val)
4136{
Chris Wilson64486ae2017-03-07 15:59:08 +00004137 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004138
Chris Wilson64486ae2017-03-07 15:59:08 +00004139 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004140}
4141
4142DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4143 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4144 "0x%08llx\n");
4145
4146static int
4147i915_ring_test_irq_get(void *data, u64 *val)
4148{
David Weinehall36cdd012016-08-22 13:59:31 +03004149 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004150
4151 *val = dev_priv->gpu_error.test_irq_rings;
4152
4153 return 0;
4154}
4155
4156static int
4157i915_ring_test_irq_set(void *data, u64 val)
4158{
Chris Wilson64486ae2017-03-07 15:59:08 +00004159 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004160
Chris Wilson64486ae2017-03-07 15:59:08 +00004161 val &= INTEL_INFO(i915)->ring_mask;
Chris Wilson094f9a52013-09-25 17:34:55 +01004162 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004163
Chris Wilson64486ae2017-03-07 15:59:08 +00004164 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004165}
4166
4167DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4168 i915_ring_test_irq_get, i915_ring_test_irq_set,
4169 "0x%08llx\n");
4170
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004171#define DROP_UNBOUND BIT(0)
4172#define DROP_BOUND BIT(1)
4173#define DROP_RETIRE BIT(2)
4174#define DROP_ACTIVE BIT(3)
4175#define DROP_FREED BIT(4)
4176#define DROP_SHRINK_ALL BIT(5)
4177#define DROP_IDLE BIT(6)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004178#define DROP_ALL (DROP_UNBOUND | \
4179 DROP_BOUND | \
4180 DROP_RETIRE | \
4181 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00004182 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004183 DROP_SHRINK_ALL |\
4184 DROP_IDLE)
Kees Cook647416f2013-03-10 14:10:06 -07004185static int
4186i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004187{
Kees Cook647416f2013-03-10 14:10:06 -07004188 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00004189
Kees Cook647416f2013-03-10 14:10:06 -07004190 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004191}
4192
Kees Cook647416f2013-03-10 14:10:06 -07004193static int
4194i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004195{
David Weinehall36cdd012016-08-22 13:59:31 +03004196 struct drm_i915_private *dev_priv = data;
4197 struct drm_device *dev = &dev_priv->drm;
Chris Wilson00c26cf2017-05-24 17:26:53 +01004198 int ret = 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004199
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004200 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4201 val, val & DROP_ALL);
Chris Wilsondd624af2013-01-15 12:39:35 +00004202
4203 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4204 * on ioctls on -EAGAIN. */
Chris Wilson00c26cf2017-05-24 17:26:53 +01004205 if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4206 ret = mutex_lock_interruptible(&dev->struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00004207 if (ret)
Chris Wilson00c26cf2017-05-24 17:26:53 +01004208 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004209
Chris Wilson00c26cf2017-05-24 17:26:53 +01004210 if (val & DROP_ACTIVE)
4211 ret = i915_gem_wait_for_idle(dev_priv,
4212 I915_WAIT_INTERRUPTIBLE |
4213 I915_WAIT_LOCKED);
4214
4215 if (val & DROP_RETIRE)
Chris Wilsone61e0f52018-02-21 09:56:36 +00004216 i915_retire_requests(dev_priv);
Chris Wilson00c26cf2017-05-24 17:26:53 +01004217
4218 mutex_unlock(&dev->struct_mutex);
4219 }
Chris Wilsondd624af2013-01-15 12:39:35 +00004220
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004221 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01004222 if (val & DROP_BOUND)
Chris Wilson912d5722017-09-06 16:19:30 -07004223 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01004224
Chris Wilson21ab4e72014-09-09 11:16:08 +01004225 if (val & DROP_UNBOUND)
Chris Wilson912d5722017-09-06 16:19:30 -07004226 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00004227
Chris Wilson8eadc192017-03-08 14:46:22 +00004228 if (val & DROP_SHRINK_ALL)
4229 i915_gem_shrink_all(dev_priv);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004230 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00004231
Chris Wilson4dfacb02018-05-31 09:22:43 +01004232 if (val & DROP_IDLE) {
4233 do {
4234 if (READ_ONCE(dev_priv->gt.active_requests))
4235 flush_delayed_work(&dev_priv->gt.retire_work);
4236 drain_delayed_work(&dev_priv->gt.idle_work);
4237 } while (READ_ONCE(dev_priv->gt.awake));
4238 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004239
Chris Wilsonc9c704712018-02-19 22:06:31 +00004240 if (val & DROP_FREED)
Chris Wilsonbdeb9782016-12-23 14:57:56 +00004241 i915_gem_drain_freed_objects(dev_priv);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004242
Kees Cook647416f2013-03-10 14:10:06 -07004243 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004244}
4245
Kees Cook647416f2013-03-10 14:10:06 -07004246DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4247 i915_drop_caches_get, i915_drop_caches_set,
4248 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00004249
Kees Cook647416f2013-03-10 14:10:06 -07004250static int
Kees Cook647416f2013-03-10 14:10:06 -07004251i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004252{
David Weinehall36cdd012016-08-22 13:59:31 +03004253 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004254 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004255
David Weinehall36cdd012016-08-22 13:59:31 +03004256 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004257 return -ENODEV;
4258
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004259 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02004260
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004261 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004262
4263 intel_runtime_pm_put(dev_priv);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004264
Kees Cook647416f2013-03-10 14:10:06 -07004265 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004266
Kees Cook647416f2013-03-10 14:10:06 -07004267 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004268}
4269
Kees Cook647416f2013-03-10 14:10:06 -07004270static int
4271i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004272{
David Weinehall36cdd012016-08-22 13:59:31 +03004273 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004274 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004275
David Weinehall36cdd012016-08-22 13:59:31 +03004276 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004277 return -ENODEV;
4278
Kees Cook647416f2013-03-10 14:10:06 -07004279 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004280 return -EINVAL;
4281
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004282 intel_runtime_pm_get(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004283 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004284
4285 /* Update the cache sharing policy here as well */
4286 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4287 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4288 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4289 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4290
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004291 intel_runtime_pm_put(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004292 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004293}
4294
Kees Cook647416f2013-03-10 14:10:06 -07004295DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4296 i915_cache_sharing_get, i915_cache_sharing_set,
4297 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004298
David Weinehall36cdd012016-08-22 13:59:31 +03004299static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004300 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004301{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004302#define SS_MAX 2
4303 const int ss_max = SS_MAX;
4304 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004305 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004306
4307 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4308 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4309 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4310 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4311
4312 for (ss = 0; ss < ss_max; ss++) {
4313 unsigned int eu_cnt;
4314
4315 if (sig1[ss] & CHV_SS_PG_ENABLE)
4316 /* skip disabled subslice */
4317 continue;
4318
Imre Deakf08a0c92016-08-31 19:13:04 +03004319 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004320 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004321 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4322 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4323 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4324 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004325 sseu->eu_total += eu_cnt;
4326 sseu->eu_per_subslice = max_t(unsigned int,
4327 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004328 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004329#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004330}
4331
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004332static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4333 struct sseu_dev_info *sseu)
4334{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004335#define SS_MAX 6
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004336 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004337 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004338 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004339
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004340 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004341 /*
4342 * FIXME: Valid SS Mask respects the spec and read
4343 * only valid bits for those registers, excluding reserverd
4344 * although this seems wrong because it would leave many
4345 * subslices without ACK.
4346 */
4347 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4348 GEN10_PGCTL_VALID_SS_MASK(s);
4349 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4350 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4351 }
4352
4353 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4354 GEN9_PGCTL_SSA_EU19_ACK |
4355 GEN9_PGCTL_SSA_EU210_ACK |
4356 GEN9_PGCTL_SSA_EU311_ACK;
4357 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4358 GEN9_PGCTL_SSB_EU19_ACK |
4359 GEN9_PGCTL_SSB_EU210_ACK |
4360 GEN9_PGCTL_SSB_EU311_ACK;
4361
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004362 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004363 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4364 /* skip disabled slice */
4365 continue;
4366
4367 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004368 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004369
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004370 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004371 unsigned int eu_cnt;
4372
4373 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4374 /* skip disabled subslice */
4375 continue;
4376
4377 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4378 eu_mask[ss % 2]);
4379 sseu->eu_total += eu_cnt;
4380 sseu->eu_per_subslice = max_t(unsigned int,
4381 sseu->eu_per_subslice,
4382 eu_cnt);
4383 }
4384 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004385#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004386}
4387
David Weinehall36cdd012016-08-22 13:59:31 +03004388static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004389 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004390{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004391#define SS_MAX 3
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004392 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004393 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004394 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004395
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004396 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004397 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4398 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4399 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4400 }
4401
Jeff McGee5d395252015-04-03 18:13:17 -07004402 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4403 GEN9_PGCTL_SSA_EU19_ACK |
4404 GEN9_PGCTL_SSA_EU210_ACK |
4405 GEN9_PGCTL_SSA_EU311_ACK;
4406 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4407 GEN9_PGCTL_SSB_EU19_ACK |
4408 GEN9_PGCTL_SSB_EU210_ACK |
4409 GEN9_PGCTL_SSB_EU311_ACK;
4410
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004411 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004412 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4413 /* skip disabled slice */
4414 continue;
4415
Imre Deakf08a0c92016-08-31 19:13:04 +03004416 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004417
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004418 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004419 sseu->subslice_mask[s] =
4420 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004421
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004422 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004423 unsigned int eu_cnt;
4424
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004425 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004426 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4427 /* skip disabled subslice */
4428 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004429
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004430 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004431 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004432
Jeff McGee5d395252015-04-03 18:13:17 -07004433 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4434 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004435 sseu->eu_total += eu_cnt;
4436 sseu->eu_per_subslice = max_t(unsigned int,
4437 sseu->eu_per_subslice,
4438 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004439 }
4440 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004441#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004442}
4443
David Weinehall36cdd012016-08-22 13:59:31 +03004444static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004445 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004446{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004447 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004448 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004449
Imre Deakf08a0c92016-08-31 19:13:04 +03004450 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004451
Imre Deakf08a0c92016-08-31 19:13:04 +03004452 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004453 sseu->eu_per_subslice =
4454 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004455 for (s = 0; s < fls(sseu->slice_mask); s++) {
4456 sseu->subslice_mask[s] =
4457 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4458 }
Imre Deak57ec1712016-08-31 19:13:05 +03004459 sseu->eu_total = sseu->eu_per_subslice *
4460 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004461
4462 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004463 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004464 u8 subslice_7eu =
4465 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004466
Imre Deak915490d2016-08-31 19:13:01 +03004467 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004468 }
4469 }
4470}
4471
Imre Deak615d8902016-08-31 19:13:03 +03004472static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4473 const struct sseu_dev_info *sseu)
4474{
4475 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4476 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004477 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004478
Imre Deakc67ba532016-08-31 19:13:06 +03004479 seq_printf(m, " %s Slice Mask: %04x\n", type,
4480 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004481 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004482 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004483 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004484 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004485 for (s = 0; s < fls(sseu->slice_mask); s++) {
4486 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4487 s, hweight8(sseu->subslice_mask[s]));
4488 }
Imre Deak615d8902016-08-31 19:13:03 +03004489 seq_printf(m, " %s EU Total: %u\n", type,
4490 sseu->eu_total);
4491 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4492 sseu->eu_per_subslice);
4493
4494 if (!is_available_info)
4495 return;
4496
4497 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4498 if (HAS_POOLED_EU(dev_priv))
4499 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4500
4501 seq_printf(m, " Has Slice Power Gating: %s\n",
4502 yesno(sseu->has_slice_pg));
4503 seq_printf(m, " Has Subslice Power Gating: %s\n",
4504 yesno(sseu->has_subslice_pg));
4505 seq_printf(m, " Has EU Power Gating: %s\n",
4506 yesno(sseu->has_eu_pg));
4507}
4508
Jeff McGee38732182015-02-13 10:27:54 -06004509static int i915_sseu_status(struct seq_file *m, void *unused)
4510{
David Weinehall36cdd012016-08-22 13:59:31 +03004511 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004512 struct sseu_dev_info sseu;
Jeff McGee38732182015-02-13 10:27:54 -06004513
David Weinehall36cdd012016-08-22 13:59:31 +03004514 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004515 return -ENODEV;
4516
4517 seq_puts(m, "SSEU Device Info\n");
Imre Deak615d8902016-08-31 19:13:03 +03004518 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004519
Jeff McGee7f992ab2015-02-13 10:27:55 -06004520 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004521 memset(&sseu, 0, sizeof(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004522 sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4523 sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4524 sseu.max_eus_per_subslice =
4525 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004526
4527 intel_runtime_pm_get(dev_priv);
4528
David Weinehall36cdd012016-08-22 13:59:31 +03004529 if (IS_CHERRYVIEW(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004530 cherryview_sseu_device_status(dev_priv, &sseu);
David Weinehall36cdd012016-08-22 13:59:31 +03004531 } else if (IS_BROADWELL(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004532 broadwell_sseu_device_status(dev_priv, &sseu);
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004533 } else if (IS_GEN9(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004534 gen9_sseu_device_status(dev_priv, &sseu);
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004535 } else if (INTEL_GEN(dev_priv) >= 10) {
4536 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004537 }
David Weinehall238010e2016-08-01 17:33:27 +03004538
4539 intel_runtime_pm_put(dev_priv);
4540
Imre Deak615d8902016-08-31 19:13:03 +03004541 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004542
Jeff McGee38732182015-02-13 10:27:54 -06004543 return 0;
4544}
4545
Ben Widawsky6d794d42011-04-25 11:25:56 -07004546static int i915_forcewake_open(struct inode *inode, struct file *file)
4547{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004548 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004549
Chris Wilsond7a133d2017-09-07 14:44:41 +01004550 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004551 return 0;
4552
Chris Wilsond7a133d2017-09-07 14:44:41 +01004553 intel_runtime_pm_get(i915);
4554 intel_uncore_forcewake_user_get(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004555
4556 return 0;
4557}
4558
Ben Widawskyc43b5632012-04-16 14:07:40 -07004559static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004560{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004561 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004562
Chris Wilsond7a133d2017-09-07 14:44:41 +01004563 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004564 return 0;
4565
Chris Wilsond7a133d2017-09-07 14:44:41 +01004566 intel_uncore_forcewake_user_put(i915);
4567 intel_runtime_pm_put(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004568
4569 return 0;
4570}
4571
4572static const struct file_operations i915_forcewake_fops = {
4573 .owner = THIS_MODULE,
4574 .open = i915_forcewake_open,
4575 .release = i915_forcewake_release,
4576};
4577
Lyude317eaa92017-02-03 21:18:25 -05004578static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4579{
4580 struct drm_i915_private *dev_priv = m->private;
4581 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4582
4583 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4584 seq_printf(m, "Detected: %s\n",
4585 yesno(delayed_work_pending(&hotplug->reenable_work)));
4586
4587 return 0;
4588}
4589
4590static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4591 const char __user *ubuf, size_t len,
4592 loff_t *offp)
4593{
4594 struct seq_file *m = file->private_data;
4595 struct drm_i915_private *dev_priv = m->private;
4596 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4597 unsigned int new_threshold;
4598 int i;
4599 char *newline;
4600 char tmp[16];
4601
4602 if (len >= sizeof(tmp))
4603 return -EINVAL;
4604
4605 if (copy_from_user(tmp, ubuf, len))
4606 return -EFAULT;
4607
4608 tmp[len] = '\0';
4609
4610 /* Strip newline, if any */
4611 newline = strchr(tmp, '\n');
4612 if (newline)
4613 *newline = '\0';
4614
4615 if (strcmp(tmp, "reset") == 0)
4616 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4617 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4618 return -EINVAL;
4619
4620 if (new_threshold > 0)
4621 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4622 new_threshold);
4623 else
4624 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4625
4626 spin_lock_irq(&dev_priv->irq_lock);
4627 hotplug->hpd_storm_threshold = new_threshold;
4628 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4629 for_each_hpd_pin(i)
4630 hotplug->stats[i].count = 0;
4631 spin_unlock_irq(&dev_priv->irq_lock);
4632
4633 /* Re-enable hpd immediately if we were in an irq storm */
4634 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4635
4636 return len;
4637}
4638
4639static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4640{
4641 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4642}
4643
4644static const struct file_operations i915_hpd_storm_ctl_fops = {
4645 .owner = THIS_MODULE,
4646 .open = i915_hpd_storm_ctl_open,
4647 .read = seq_read,
4648 .llseek = seq_lseek,
4649 .release = single_release,
4650 .write = i915_hpd_storm_ctl_write
4651};
4652
C, Ramalingam35954e82017-11-08 00:08:23 +05304653static int i915_drrs_ctl_set(void *data, u64 val)
4654{
4655 struct drm_i915_private *dev_priv = data;
4656 struct drm_device *dev = &dev_priv->drm;
4657 struct intel_crtc *intel_crtc;
4658 struct intel_encoder *encoder;
4659 struct intel_dp *intel_dp;
4660
4661 if (INTEL_GEN(dev_priv) < 7)
4662 return -ENODEV;
4663
4664 drm_modeset_lock_all(dev);
4665 for_each_intel_crtc(dev, intel_crtc) {
4666 if (!intel_crtc->base.state->active ||
4667 !intel_crtc->config->has_drrs)
4668 continue;
4669
4670 for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4671 if (encoder->type != INTEL_OUTPUT_EDP)
4672 continue;
4673
4674 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4675 val ? "en" : "dis", val);
4676
4677 intel_dp = enc_to_intel_dp(&encoder->base);
4678 if (val)
4679 intel_edp_drrs_enable(intel_dp,
4680 intel_crtc->config);
4681 else
4682 intel_edp_drrs_disable(intel_dp,
4683 intel_crtc->config);
4684 }
4685 }
4686 drm_modeset_unlock_all(dev);
4687
4688 return 0;
4689}
4690
4691DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4692
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004693static ssize_t
4694i915_fifo_underrun_reset_write(struct file *filp,
4695 const char __user *ubuf,
4696 size_t cnt, loff_t *ppos)
4697{
4698 struct drm_i915_private *dev_priv = filp->private_data;
4699 struct intel_crtc *intel_crtc;
4700 struct drm_device *dev = &dev_priv->drm;
4701 int ret;
4702 bool reset;
4703
4704 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4705 if (ret)
4706 return ret;
4707
4708 if (!reset)
4709 return cnt;
4710
4711 for_each_intel_crtc(dev, intel_crtc) {
4712 struct drm_crtc_commit *commit;
4713 struct intel_crtc_state *crtc_state;
4714
4715 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4716 if (ret)
4717 return ret;
4718
4719 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4720 commit = crtc_state->base.commit;
4721 if (commit) {
4722 ret = wait_for_completion_interruptible(&commit->hw_done);
4723 if (!ret)
4724 ret = wait_for_completion_interruptible(&commit->flip_done);
4725 }
4726
4727 if (!ret && crtc_state->base.active) {
4728 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4729 pipe_name(intel_crtc->pipe));
4730
4731 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4732 }
4733
4734 drm_modeset_unlock(&intel_crtc->base.mutex);
4735
4736 if (ret)
4737 return ret;
4738 }
4739
4740 ret = intel_fbc_reset_underrun(dev_priv);
4741 if (ret)
4742 return ret;
4743
4744 return cnt;
4745}
4746
4747static const struct file_operations i915_fifo_underrun_reset_ops = {
4748 .owner = THIS_MODULE,
4749 .open = simple_open,
4750 .write = i915_fifo_underrun_reset_write,
4751 .llseek = default_llseek,
4752};
4753
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004754static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004755 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004756 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004757 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004758 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004759 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004760 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004761 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004762 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004763 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004764 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004765 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004766 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004767 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304768 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004769 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004770 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004771 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004772 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004773 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004774 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004775 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004776 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004777 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004778 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004779 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004780 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004781 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004782 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004783 {"i915_swizzle_info", i915_swizzle_info, 0},
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01004784 {"i915_ppgtt_info", i915_ppgtt_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004785 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004786 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Rodrigo Vivid2e216d2014-01-24 13:36:17 -02004787 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004788 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004789 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004790 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004791 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004792 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004793 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004794 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004795 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004796 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004797 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004798 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004799 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004800 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304801 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004802 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004803};
Ben Gamari27c202a2009-07-01 22:26:52 -04004804#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004805
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004806static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004807 const char *name;
4808 const struct file_operations *fops;
4809} i915_debugfs_files[] = {
4810 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004811 {"i915_cache_sharing", &i915_cache_sharing_fops},
Chris Wilson094f9a52013-09-25 17:34:55 +01004812 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4813 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004814 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004815#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004816 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004817 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004818#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004819 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004820 {"i915_next_seqno", &i915_next_seqno_fops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004821 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4822 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4823 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004824 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004825 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4826 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304827 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004828 {"i915_guc_log_level", &i915_guc_log_level_fops},
4829 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304830 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304831 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004832 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4833 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004834};
4835
Chris Wilson1dac8912016-06-24 14:00:17 +01004836int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004837{
Chris Wilson91c8a322016-07-05 10:40:23 +01004838 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004839 struct dentry *ent;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004840 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004841
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004842 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4843 minor->debugfs_root, to_i915(minor->dev),
4844 &i915_forcewake_fops);
4845 if (!ent)
4846 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004847
Daniel Vetter34b96742013-07-04 20:49:44 +02004848 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004849 ent = debugfs_create_file(i915_debugfs_files[i].name,
4850 S_IRUGO | S_IWUSR,
4851 minor->debugfs_root,
4852 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004853 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004854 if (!ent)
4855 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004856 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004857
Ben Gamari27c202a2009-07-01 22:26:52 -04004858 return drm_debugfs_create_files(i915_debugfs_list,
4859 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004860 minor->debugfs_root, minor);
4861}
4862
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004863struct dpcd_block {
4864 /* DPCD dump start address. */
4865 unsigned int offset;
4866 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4867 unsigned int end;
4868 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4869 size_t size;
4870 /* Only valid for eDP. */
4871 bool edp;
4872};
4873
4874static const struct dpcd_block i915_dpcd_debug[] = {
4875 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4876 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4877 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4878 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4879 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4880 { .offset = DP_SET_POWER },
4881 { .offset = DP_EDP_DPCD_REV },
4882 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4883 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4884 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4885};
4886
4887static int i915_dpcd_show(struct seq_file *m, void *data)
4888{
4889 struct drm_connector *connector = m->private;
4890 struct intel_dp *intel_dp =
4891 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4892 uint8_t buf[16];
4893 ssize_t err;
4894 int i;
4895
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004896 if (connector->status != connector_status_connected)
4897 return -ENODEV;
4898
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004899 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4900 const struct dpcd_block *b = &i915_dpcd_debug[i];
4901 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4902
4903 if (b->edp &&
4904 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4905 continue;
4906
4907 /* low tech for now */
4908 if (WARN_ON(size > sizeof(buf)))
4909 continue;
4910
4911 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4912 if (err <= 0) {
4913 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4914 size, b->offset, err);
4915 continue;
4916 }
4917
4918 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004919 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004920
4921 return 0;
4922}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004923DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004924
David Weinehallecbd6782016-08-23 12:23:56 +03004925static int i915_panel_show(struct seq_file *m, void *data)
4926{
4927 struct drm_connector *connector = m->private;
4928 struct intel_dp *intel_dp =
4929 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4930
4931 if (connector->status != connector_status_connected)
4932 return -ENODEV;
4933
4934 seq_printf(m, "Panel power up delay: %d\n",
4935 intel_dp->panel_power_up_delay);
4936 seq_printf(m, "Panel power down delay: %d\n",
4937 intel_dp->panel_power_down_delay);
4938 seq_printf(m, "Backlight on delay: %d\n",
4939 intel_dp->backlight_on_delay);
4940 seq_printf(m, "Backlight off delay: %d\n",
4941 intel_dp->backlight_off_delay);
4942
4943 return 0;
4944}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004945DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004946
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004947/**
4948 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4949 * @connector: pointer to a registered drm_connector
4950 *
4951 * Cleanup will be done by drm_connector_unregister() through a call to
4952 * drm_debugfs_connector_remove().
4953 *
4954 * Returns 0 on success, negative error codes on error.
4955 */
4956int i915_debugfs_connector_add(struct drm_connector *connector)
4957{
4958 struct dentry *root = connector->debugfs_entry;
4959
4960 /* The connector must have been registered beforehands. */
4961 if (!root)
4962 return -ENODEV;
4963
4964 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4965 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004966 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4967 connector, &i915_dpcd_fops);
4968
4969 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4970 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4971 connector, &i915_panel_fops);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004972
4973 return 0;
4974}