blob: 1a4d9d996fda0eabbbf89e4f90e36b85a915ef1d [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsonf3cd4742009-10-13 22:20:20 +010029#include <linux/debugfs.h>
Chris Wilsone637d2c2017-03-16 13:19:57 +000030#include <linux/sort.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010031#include <linux/sched/mm.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010032#include "intel_drv.h"
Sagar Arun Kamblea2695742017-11-16 19:02:41 +053033#include "intel_guc_submission.h"
Ben Gamari20172632009-02-17 20:08:50 -050034
David Weinehall36cdd012016-08-22 13:59:31 +030035static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36{
37 return to_i915(node->minor->dev);
38}
39
Chris Wilson70d39fe2010-08-25 16:03:34 +010040static int i915_capabilities(struct seq_file *m, void *data)
41{
David Weinehall36cdd012016-08-22 13:59:31 +030042 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000044 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010045
David Weinehall36cdd012016-08-22 13:59:31 +030046 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020047 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030048 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000049
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000050 intel_device_info_dump_flags(info, &p);
Michal Wajdeczko5fbbe8d2017-12-21 21:57:34 +000051 intel_device_info_dump_runtime(info, &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000052 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010053
Chris Wilson418e3cd2017-02-06 21:36:08 +000054 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000055 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000056 kernel_param_unlock(THIS_MODULE);
57
Chris Wilson70d39fe2010-08-25 16:03:34 +010058 return 0;
59}
Ben Gamari433e12f2009-02-17 20:08:51 -050060
Imre Deaka7363de2016-05-12 16:18:52 +030061static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000062{
Chris Wilson573adb32016-08-04 16:32:39 +010063 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000064}
65
Imre Deaka7363de2016-05-12 16:18:52 +030066static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010067{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010068 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010069}
70
Imre Deaka7363de2016-05-12 16:18:52 +030071static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000072{
Chris Wilson3e510a82016-08-05 10:14:23 +010073 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040074 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010075 case I915_TILING_NONE: return ' ';
76 case I915_TILING_X: return 'X';
77 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040078 }
Chris Wilsona6172a82009-02-11 14:26:38 +000079}
80
Imre Deaka7363de2016-05-12 16:18:52 +030081static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070082{
Chris Wilsona65adaf2017-10-09 09:43:57 +010083 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010084}
85
Imre Deaka7363de2016-05-12 16:18:52 +030086static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010087{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010088 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -070089}
90
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010091static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92{
93 u64 size = 0;
94 struct i915_vma *vma;
95
Chris Wilsone2189dd2017-12-07 21:14:07 +000096 for_each_ggtt_vma(vma, obj) {
97 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010098 size += vma->node.size;
99 }
100
101 return size;
102}
103
Matthew Auld7393b7e2017-10-06 23:18:28 +0100104static const char *
105stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106{
107 size_t x = 0;
108
109 switch (page_sizes) {
110 case 0:
111 return "";
112 case I915_GTT_PAGE_SIZE_4K:
113 return "4K";
114 case I915_GTT_PAGE_SIZE_64K:
115 return "64K";
116 case I915_GTT_PAGE_SIZE_2M:
117 return "2M";
118 default:
119 if (!buf)
120 return "M";
121
122 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 x += snprintf(buf + x, len - x, "2M, ");
124 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 x += snprintf(buf + x, len - x, "64K, ");
126 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 x += snprintf(buf + x, len - x, "4K, ");
128 buf[x-2] = '\0';
129
130 return buf;
131 }
132}
133
Chris Wilson37811fc2010-08-25 22:45:57 +0100134static void
135describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136{
Chris Wilsonb4716182015-04-27 13:41:17 +0100137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000138 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700139 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100140 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800141 int pin_count = 0;
142
Chris Wilson188c1ab2016-04-03 14:14:20 +0100143 lockdep_assert_held(&obj->base.dev->struct_mutex);
144
Chris Wilsond07f0e52016-10-28 13:58:44 +0100145 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100146 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100147 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100148 get_pin_flag(obj),
149 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700150 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100151 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800152 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100153 obj->read_domains,
154 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300155 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100156 obj->mm.dirty ? " dirty" : "",
157 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100158 if (obj->base.name)
159 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000160 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100161 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800162 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300163 }
164 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100165 if (obj->pin_global)
166 seq_printf(m, " (global)");
Chris Wilson1c7f4bc2016-02-26 11:03:19 +0000167 list_for_each_entry(vma, &obj->vma_list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100168 if (!drm_mm_node_allocated(&vma->node))
169 continue;
170
Matthew Auld7393b7e2017-10-06 23:18:28 +0100171 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100172 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100173 vma->node.start, vma->node.size,
174 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000175 if (i915_vma_is_ggtt(vma)) {
176 switch (vma->ggtt_view.type) {
177 case I915_GGTT_VIEW_NORMAL:
178 seq_puts(m, ", normal");
179 break;
180
181 case I915_GGTT_VIEW_PARTIAL:
182 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000183 vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000185 break;
186
187 case I915_GGTT_VIEW_ROTATED:
188 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000189 vma->ggtt_view.rotated.plane[0].width,
190 vma->ggtt_view.rotated.plane[0].height,
191 vma->ggtt_view.rotated.plane[0].stride,
192 vma->ggtt_view.rotated.plane[0].offset,
193 vma->ggtt_view.rotated.plane[1].width,
194 vma->ggtt_view.rotated.plane[1].height,
195 vma->ggtt_view.rotated.plane[1].stride,
196 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000197 break;
198
199 default:
200 MISSING_CASE(vma->ggtt_view.type);
201 break;
202 }
203 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100204 if (vma->fence)
205 seq_printf(m, " , fence: %d%s",
206 vma->fence->id,
207 i915_gem_active_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000208 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700209 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000210 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100211 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100212
Chris Wilsond07f0e52016-10-28 13:58:44 +0100213 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100214 if (engine)
215 seq_printf(m, " (%s)", engine->name);
216
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100217 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 if (frontbuffer_bits)
219 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100220}
221
Chris Wilsone637d2c2017-03-16 13:19:57 +0000222static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100223{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000224 const struct drm_i915_gem_object *a =
225 *(const struct drm_i915_gem_object **)A;
226 const struct drm_i915_gem_object *b =
227 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100228
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200229 if (a->stolen->start < b->stolen->start)
230 return -1;
231 if (a->stolen->start > b->stolen->start)
232 return 1;
233 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100234}
235
236static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237{
David Weinehall36cdd012016-08-22 13:59:31 +0300238 struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000240 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100241 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300242 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000243 unsigned long total, count, n;
244 int ret;
245
246 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200247 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000248 if (!objects)
249 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100250
251 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000253 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100254
255 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100256
257 spin_lock(&dev_priv->mm.obj_lock);
258 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000259 if (count == total)
260 break;
261
Chris Wilson6d2b88852013-08-07 18:30:54 +0100262 if (obj->stolen == NULL)
263 continue;
264
Chris Wilsone637d2c2017-03-16 13:19:57 +0000265 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100266 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100267 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000268
Chris Wilson6d2b88852013-08-07 18:30:54 +0100269 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100270 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000271 if (count == total)
272 break;
273
Chris Wilson6d2b88852013-08-07 18:30:54 +0100274 if (obj->stolen == NULL)
275 continue;
276
Chris Wilsone637d2c2017-03-16 13:19:57 +0000277 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100278 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100279 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100280 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100281
Chris Wilsone637d2c2017-03-16 13:19:57 +0000282 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284 seq_puts(m, "Stolen:\n");
285 for (n = 0; n < count; n++) {
286 seq_puts(m, " ");
287 describe_obj(m, objects[n]);
288 seq_putc(m, '\n');
289 }
290 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100291 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000292
293 mutex_unlock(&dev->struct_mutex);
294out:
Michal Hocko20981052017-05-17 14:23:12 +0200295 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000296 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100297}
298
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100299struct file_stats {
Chris Wilson6313c202014-03-19 13:45:45 +0000300 struct drm_i915_file_private *file_priv;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300301 unsigned long count;
302 u64 total, unbound;
303 u64 global, shared;
304 u64 active, inactive;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100305};
306
307static int per_file_stats(int id, void *ptr, void *data)
308{
309 struct drm_i915_gem_object *obj = ptr;
310 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000311 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100312
Chris Wilson0caf81b2017-06-17 12:57:44 +0100313 lockdep_assert_held(&obj->base.dev->struct_mutex);
314
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100315 stats->count++;
316 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100317 if (!obj->bind_count)
318 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000319 if (obj->base.name || obj->base.dma_buf)
320 stats->shared += obj->base.size;
321
Chris Wilson894eeec2016-08-04 07:52:20 +0100322 list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 if (!drm_mm_node_allocated(&vma->node))
324 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000325
Chris Wilson3272db52016-08-04 16:32:32 +0100326 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100327 stats->global += vma->node.size;
328 } else {
329 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
Chris Wilson6313c202014-03-19 13:45:45 +0000330
Chris Wilson82ad6442018-06-05 16:37:58 +0100331 if (ppgtt->vm.file != stats->file_priv)
Chris Wilson6313c202014-03-19 13:45:45 +0000332 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000333 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100334
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100335 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100336 stats->active += vma->node.size;
337 else
338 stats->inactive += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100339 }
340
341 return 0;
342}
343
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100344#define print_file_stats(m, name, stats) do { \
345 if (stats.count) \
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300346 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100347 name, \
348 stats.count, \
349 stats.total, \
350 stats.active, \
351 stats.inactive, \
352 stats.global, \
353 stats.shared, \
354 stats.unbound); \
355} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800356
357static void print_batch_pool_stats(struct seq_file *m,
358 struct drm_i915_private *dev_priv)
359{
360 struct drm_i915_gem_object *obj;
361 struct file_stats stats;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000362 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530363 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000364 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800365
366 memset(&stats, 0, sizeof(stats));
367
Akash Goel3b3f1652016-10-13 22:44:48 +0530368 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000369 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100370 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000371 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100372 batch_pool_link)
373 per_file_stats(0, obj, &stats);
374 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100375 }
Brad Volkin493018d2014-12-11 12:13:08 -0800376
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100377 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800378}
379
Chris Wilsonab82a062018-04-30 14:15:01 +0100380static int per_file_ctx_stats(int idx, void *ptr, void *data)
Chris Wilson15da9562016-05-24 14:53:43 +0100381{
382 struct i915_gem_context *ctx = ptr;
Chris Wilsonab82a062018-04-30 14:15:01 +0100383 struct intel_engine_cs *engine;
384 enum intel_engine_id id;
Chris Wilson15da9562016-05-24 14:53:43 +0100385
Chris Wilsonab82a062018-04-30 14:15:01 +0100386 for_each_engine(engine, ctx->i915, id) {
387 struct intel_context *ce = to_intel_context(ctx, engine);
388
389 if (ce->state)
390 per_file_stats(0, ce->state->obj, data);
391 if (ce->ring)
392 per_file_stats(0, ce->ring->vma->obj, data);
Chris Wilson15da9562016-05-24 14:53:43 +0100393 }
394
395 return 0;
396}
397
398static void print_context_stats(struct seq_file *m,
399 struct drm_i915_private *dev_priv)
400{
David Weinehall36cdd012016-08-22 13:59:31 +0300401 struct drm_device *dev = &dev_priv->drm;
Chris Wilson15da9562016-05-24 14:53:43 +0100402 struct file_stats stats;
403 struct drm_file *file;
404
405 memset(&stats, 0, sizeof(stats));
406
David Weinehall36cdd012016-08-22 13:59:31 +0300407 mutex_lock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100408 if (dev_priv->kernel_context)
409 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
David Weinehall36cdd012016-08-22 13:59:31 +0300411 list_for_each_entry(file, &dev->filelist, lhead) {
Chris Wilson15da9562016-05-24 14:53:43 +0100412 struct drm_i915_file_private *fpriv = file->driver_priv;
413 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 }
David Weinehall36cdd012016-08-22 13:59:31 +0300415 mutex_unlock(&dev->struct_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100416
417 print_file_stats(m, "[k]contexts", stats);
418}
419
David Weinehall36cdd012016-08-22 13:59:31 +0300420static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100421{
David Weinehall36cdd012016-08-22 13:59:31 +0300422 struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300424 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100425 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000427 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100428 unsigned int page_sizes = 0;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100429 struct drm_file *file;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100430 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100431 int ret;
432
433 ret = mutex_lock_interruptible(&dev->struct_mutex);
434 if (ret)
435 return ret;
436
Chris Wilson3ef7f222016-10-18 13:02:48 +0100437 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000438 dev_priv->mm.object_count,
439 dev_priv->mm.object_memory);
440
Chris Wilson1544c422016-08-15 13:18:16 +0100441 size = count = 0;
442 mapped_size = mapped_count = 0;
443 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100444 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100445
446 spin_lock(&dev_priv->mm.obj_lock);
447 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100448 size += obj->base.size;
449 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200450
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100451 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200452 purgeable_size += obj->base.size;
453 ++purgeable_count;
454 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100455
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100456 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100457 mapped_count++;
458 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100459 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100460
461 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 huge_count++;
463 huge_size += obj->base.size;
464 page_sizes |= obj->mm.page_sizes.sg;
465 }
Chris Wilson6299f992010-11-24 12:23:44 +0000466 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100467 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468
469 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100470 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100471 size += obj->base.size;
472 ++count;
473
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100474 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100475 dpy_size += obj->base.size;
476 ++dpy_count;
477 }
478
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100479 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100480 purgeable_size += obj->base.size;
481 ++purgeable_count;
482 }
483
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100484 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100485 mapped_count++;
486 mapped_size += obj->base.size;
487 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100488
489 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 huge_count++;
491 huge_size += obj->base.size;
492 page_sizes |= obj->mm.page_sizes.sg;
493 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100494 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100495 spin_unlock(&dev_priv->mm.obj_lock);
496
Chris Wilson2bd160a2016-08-15 10:48:45 +0100497 seq_printf(m, "%u bound objects, %llu bytes\n",
498 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300499 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200500 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100501 seq_printf(m, "%u mapped objects, %llu bytes\n",
502 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100503 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 huge_count,
505 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100507 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100508 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000509
Matthew Auldb7128ef2017-12-11 15:18:22 +0000510 seq_printf(m, "%llu [%pa] gtt total\n",
Chris Wilson82ad6442018-06-05 16:37:58 +0100511 ggtt->vm.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100512 seq_printf(m, "Supported page sizes: %s\n",
513 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100515
Damien Lespiau267f0c92013-06-24 22:59:48 +0100516 seq_putc(m, '\n');
Brad Volkin493018d2014-12-11 12:13:08 -0800517 print_batch_pool_stats(m, dev_priv);
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200518 mutex_unlock(&dev->struct_mutex);
519
520 mutex_lock(&dev->filelist_mutex);
Chris Wilson15da9562016-05-24 14:53:43 +0100521 print_context_stats(m, dev_priv);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100522 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 struct file_stats stats;
Chris Wilsonc84455b2016-08-15 10:49:08 +0100524 struct drm_i915_file_private *file_priv = file->driver_priv;
Chris Wilsone61e0f52018-02-21 09:56:36 +0000525 struct i915_request *request;
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900526 struct task_struct *task;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100527
Chris Wilson0caf81b2017-06-17 12:57:44 +0100528 mutex_lock(&dev->struct_mutex);
529
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100530 memset(&stats, 0, sizeof(stats));
Chris Wilson6313c202014-03-19 13:45:45 +0000531 stats.file_priv = file->driver_priv;
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100532 spin_lock(&file->table_lock);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100533 idr_for_each(&file->object_idr, per_file_stats, &stats);
Chris Wilson5b5ffff2014-06-17 09:56:24 +0100534 spin_unlock(&file->table_lock);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900535 /*
536 * Although we have a valid reference on file->pid, that does
537 * not guarantee that the task_struct who called get_pid() is
538 * still alive (e.g. get_pid(current) => fork() => exit()).
539 * Therefore, we need to protect this ->comm access using RCU.
540 */
Chris Wilsonc84455b2016-08-15 10:49:08 +0100541 request = list_first_entry_or_null(&file_priv->mm.request_list,
Chris Wilsone61e0f52018-02-21 09:56:36 +0000542 struct i915_request,
Chris Wilsonc8659ef2017-03-02 12:25:25 +0000543 client_link);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900544 rcu_read_lock();
Chris Wilson4e0d64d2018-05-17 22:26:30 +0100545 task = pid_task(request && request->gem_context->pid ?
546 request->gem_context->pid : file->pid,
Chris Wilsonc84455b2016-08-15 10:49:08 +0100547 PIDTYPE_PID);
Brad Volkin493018d2014-12-11 12:13:08 -0800548 print_file_stats(m, task ? task->comm : "<unknown>", stats);
Tetsuo Handa3ec2f422014-01-03 20:42:18 +0900549 rcu_read_unlock();
Chris Wilson0caf81b2017-06-17 12:57:44 +0100550
Chris Wilsonc84455b2016-08-15 10:49:08 +0100551 mutex_unlock(&dev->struct_mutex);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100552 }
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200553 mutex_unlock(&dev->filelist_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100554
555 return 0;
556}
557
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100558static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000559{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100560 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300561 struct drm_i915_private *dev_priv = node_to_i915(node);
562 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100563 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000564 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300565 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100566 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000567 int count, ret;
568
Chris Wilsonf2123812017-10-16 12:40:37 +0100569 nobject = READ_ONCE(dev_priv->mm.object_count);
570 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 if (!objects)
572 return -ENOMEM;
573
Chris Wilson08c18322011-01-10 00:00:24 +0000574 ret = mutex_lock_interruptible(&dev->struct_mutex);
575 if (ret)
576 return ret;
577
Chris Wilsonf2123812017-10-16 12:40:37 +0100578 count = 0;
579 spin_lock(&dev_priv->mm.obj_lock);
580 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 objects[count++] = obj;
582 if (count == nobject)
583 break;
584 }
585 spin_unlock(&dev_priv->mm.obj_lock);
586
587 total_obj_size = total_gtt_size = 0;
588 for (n = 0; n < count; n++) {
589 obj = objects[n];
590
Damien Lespiau267f0c92013-06-24 22:59:48 +0100591 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000592 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100593 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000594 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100595 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000596 }
597
598 mutex_unlock(&dev->struct_mutex);
599
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300600 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000601 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100602 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000603
604 return 0;
605}
606
Brad Volkin493018d2014-12-11 12:13:08 -0800607static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608{
David Weinehall36cdd012016-08-22 13:59:31 +0300609 struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800611 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000612 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530613 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100614 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000615 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800616
617 ret = mutex_lock_interruptible(&dev->struct_mutex);
618 if (ret)
619 return ret;
620
Akash Goel3b3f1652016-10-13 22:44:48 +0530621 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000622 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100623 int count;
624
625 count = 0;
626 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000627 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100628 batch_pool_link)
629 count++;
630 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000631 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100632
633 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000634 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100635 batch_pool_link) {
636 seq_puts(m, " ");
637 describe_obj(m, obj);
638 seq_putc(m, '\n');
639 }
640
641 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100642 }
Brad Volkin493018d2014-12-11 12:13:08 -0800643 }
644
Chris Wilson8d9d5742015-04-07 16:20:38 +0100645 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800646
647 mutex_unlock(&dev->struct_mutex);
648
649 return 0;
650}
651
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200652static void gen8_display_interrupt_info(struct seq_file *m)
653{
654 struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 int pipe;
656
657 for_each_pipe(dev_priv, pipe) {
658 enum intel_display_power_domain power_domain;
659
660 power_domain = POWER_DOMAIN_PIPE(pipe);
661 if (!intel_display_power_get_if_enabled(dev_priv,
662 power_domain)) {
663 seq_printf(m, "Pipe %c power disabled\n",
664 pipe_name(pipe));
665 continue;
666 }
667 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 pipe_name(pipe),
669 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 pipe_name(pipe),
672 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 seq_printf(m, "Pipe %c IER:\t%08x\n",
674 pipe_name(pipe),
675 I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677 intel_display_power_put(dev_priv, power_domain);
678 }
679
680 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 I915_READ(GEN8_DE_PORT_IMR));
682 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 I915_READ(GEN8_DE_PORT_IIR));
684 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 I915_READ(GEN8_DE_PORT_IER));
686
687 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 I915_READ(GEN8_DE_MISC_IMR));
689 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 I915_READ(GEN8_DE_MISC_IIR));
691 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 I915_READ(GEN8_DE_MISC_IER));
693
694 seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 I915_READ(GEN8_PCU_IMR));
696 seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 I915_READ(GEN8_PCU_IIR));
698 seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 I915_READ(GEN8_PCU_IER));
700}
701
Ben Gamari20172632009-02-17 20:08:50 -0500702static int i915_interrupt_info(struct seq_file *m, void *data)
703{
David Weinehall36cdd012016-08-22 13:59:31 +0300704 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000705 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530706 enum intel_engine_id id;
Chris Wilson4bb05042016-09-03 07:53:43 +0100707 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100708
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200709 intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500710
David Weinehall36cdd012016-08-22 13:59:31 +0300711 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300712 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 I915_READ(GEN8_MASTER_IRQ));
714
715 seq_printf(m, "Display IER:\t%08x\n",
716 I915_READ(VLV_IER));
717 seq_printf(m, "Display IIR:\t%08x\n",
718 I915_READ(VLV_IIR));
719 seq_printf(m, "Display IIR_RW:\t%08x\n",
720 I915_READ(VLV_IIR_RW));
721 seq_printf(m, "Display IMR:\t%08x\n",
722 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100723 for_each_pipe(dev_priv, pipe) {
724 enum intel_display_power_domain power_domain;
725
726 power_domain = POWER_DOMAIN_PIPE(pipe);
727 if (!intel_display_power_get_if_enabled(dev_priv,
728 power_domain)) {
729 seq_printf(m, "Pipe %c power disabled\n",
730 pipe_name(pipe));
731 continue;
732 }
733
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300734 seq_printf(m, "Pipe %c stat:\t%08x\n",
735 pipe_name(pipe),
736 I915_READ(PIPESTAT(pipe)));
737
Chris Wilson9c870d02016-10-24 13:42:15 +0100738 intel_display_power_put(dev_priv, power_domain);
739 }
740
741 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300742 seq_printf(m, "Port hotplug:\t%08x\n",
743 I915_READ(PORT_HOTPLUG_EN));
744 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 I915_READ(VLV_DPFLIPSTAT));
746 seq_printf(m, "DPINVGTT:\t%08x\n",
747 I915_READ(DPINVGTT));
Chris Wilson9c870d02016-10-24 13:42:15 +0100748 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300749
750 for (i = 0; i < 4; i++) {
751 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 i, I915_READ(GEN8_GT_IMR(i)));
753 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 i, I915_READ(GEN8_GT_IIR(i)));
755 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 i, I915_READ(GEN8_GT_IER(i)));
757 }
758
759 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 I915_READ(GEN8_PCU_IMR));
761 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 I915_READ(GEN8_PCU_IIR));
763 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200765 } else if (INTEL_GEN(dev_priv) >= 11) {
766 seq_printf(m, "Master Interrupt Control: %08x\n",
767 I915_READ(GEN11_GFX_MSTR_IRQ));
768
769 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
770 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
772 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
774 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
778 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
780 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 I915_READ(GEN11_DISPLAY_INT_CTL));
784
785 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300786 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700787 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 I915_READ(GEN8_MASTER_IRQ));
789
790 for (i = 0; i < 4; i++) {
791 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 i, I915_READ(GEN8_GT_IMR(i)));
793 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 i, I915_READ(GEN8_GT_IIR(i)));
795 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 i, I915_READ(GEN8_GT_IER(i)));
797 }
798
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200799 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300800 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700801 seq_printf(m, "Display IER:\t%08x\n",
802 I915_READ(VLV_IER));
803 seq_printf(m, "Display IIR:\t%08x\n",
804 I915_READ(VLV_IIR));
805 seq_printf(m, "Display IIR_RW:\t%08x\n",
806 I915_READ(VLV_IIR_RW));
807 seq_printf(m, "Display IMR:\t%08x\n",
808 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000809 for_each_pipe(dev_priv, pipe) {
810 enum intel_display_power_domain power_domain;
811
812 power_domain = POWER_DOMAIN_PIPE(pipe);
813 if (!intel_display_power_get_if_enabled(dev_priv,
814 power_domain)) {
815 seq_printf(m, "Pipe %c power disabled\n",
816 pipe_name(pipe));
817 continue;
818 }
819
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700820 seq_printf(m, "Pipe %c stat:\t%08x\n",
821 pipe_name(pipe),
822 I915_READ(PIPESTAT(pipe)));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000823 intel_display_power_put(dev_priv, power_domain);
824 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700825
826 seq_printf(m, "Master IER:\t%08x\n",
827 I915_READ(VLV_MASTER_IER));
828
829 seq_printf(m, "Render IER:\t%08x\n",
830 I915_READ(GTIER));
831 seq_printf(m, "Render IIR:\t%08x\n",
832 I915_READ(GTIIR));
833 seq_printf(m, "Render IMR:\t%08x\n",
834 I915_READ(GTIMR));
835
836 seq_printf(m, "PM IER:\t\t%08x\n",
837 I915_READ(GEN6_PMIER));
838 seq_printf(m, "PM IIR:\t\t%08x\n",
839 I915_READ(GEN6_PMIIR));
840 seq_printf(m, "PM IMR:\t\t%08x\n",
841 I915_READ(GEN6_PMIMR));
842
843 seq_printf(m, "Port hotplug:\t%08x\n",
844 I915_READ(PORT_HOTPLUG_EN));
845 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 I915_READ(VLV_DPFLIPSTAT));
847 seq_printf(m, "DPINVGTT:\t%08x\n",
848 I915_READ(DPINVGTT));
849
David Weinehall36cdd012016-08-22 13:59:31 +0300850 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800851 seq_printf(m, "Interrupt enable: %08x\n",
852 I915_READ(IER));
853 seq_printf(m, "Interrupt identity: %08x\n",
854 I915_READ(IIR));
855 seq_printf(m, "Interrupt mask: %08x\n",
856 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100857 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800858 seq_printf(m, "Pipe %c stat: %08x\n",
859 pipe_name(pipe),
860 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800861 } else {
862 seq_printf(m, "North Display Interrupt enable: %08x\n",
863 I915_READ(DEIER));
864 seq_printf(m, "North Display Interrupt identity: %08x\n",
865 I915_READ(DEIIR));
866 seq_printf(m, "North Display Interrupt mask: %08x\n",
867 I915_READ(DEIMR));
868 seq_printf(m, "South Display Interrupt enable: %08x\n",
869 I915_READ(SDEIER));
870 seq_printf(m, "South Display Interrupt identity: %08x\n",
871 I915_READ(SDEIIR));
872 seq_printf(m, "South Display Interrupt mask: %08x\n",
873 I915_READ(SDEIMR));
874 seq_printf(m, "Graphics Interrupt enable: %08x\n",
875 I915_READ(GTIER));
876 seq_printf(m, "Graphics Interrupt identity: %08x\n",
877 I915_READ(GTIIR));
878 seq_printf(m, "Graphics Interrupt mask: %08x\n",
879 I915_READ(GTIMR));
880 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200881
882 if (INTEL_GEN(dev_priv) >= 11) {
883 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 I915_READ(GEN11_GUC_SG_INTR_MASK));
895 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000903 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100904 seq_printf(m,
905 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000906 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000907 }
Chris Wilson9862e602011-01-04 22:22:17 +0000908 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200909
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200910 intel_runtime_pm_put(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100911
Ben Gamari20172632009-02-17 20:08:50 -0500912 return 0;
913}
914
Chris Wilsona6172a82009-02-11 14:26:38 +0000915static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916{
David Weinehall36cdd012016-08-22 13:59:31 +0300917 struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100919 int i, ret;
920
921 ret = mutex_lock_interruptible(&dev->struct_mutex);
922 if (ret)
923 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000924
Chris Wilsona6172a82009-02-11 14:26:38 +0000925 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100927 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000928
Chris Wilson6c085a72012-08-20 11:40:46 +0200929 seq_printf(m, "Fence %d, pin count = %d, object = ",
930 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100931 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100932 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100933 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100934 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100935 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000936 }
937
Chris Wilson05394f32010-11-08 19:18:58 +0000938 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000939 return 0;
940}
941
Chris Wilson98a2f412016-10-12 10:05:18 +0100942#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000943static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 size_t count, loff_t *pos)
945{
946 struct i915_gpu_state *error = file->private_data;
947 struct drm_i915_error_state_buf str;
948 ssize_t ret;
949 loff_t tmp;
950
951 if (!error)
952 return 0;
953
954 ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955 if (ret)
956 return ret;
957
958 ret = i915_error_state_to_str(&str, error);
959 if (ret)
960 goto out;
961
962 tmp = 0;
963 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964 if (ret < 0)
965 goto out;
966
967 *pos = str.start + ret;
968out:
969 i915_error_state_buf_release(&str);
970 return ret;
971}
972
973static int gpu_state_release(struct inode *inode, struct file *file)
974{
975 i915_gpu_state_put(file->private_data);
976 return 0;
977}
978
979static int i915_gpu_info_open(struct inode *inode, struct file *file)
980{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100981 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000982 struct i915_gpu_state *gpu;
983
Chris Wilson090e5fe2017-03-28 14:14:07 +0100984 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000987 if (!gpu)
988 return -ENOMEM;
989
990 file->private_data = gpu;
991 return 0;
992}
993
994static const struct file_operations i915_gpu_info_fops = {
995 .owner = THIS_MODULE,
996 .open = i915_gpu_info_open,
997 .read = gpu_state_read,
998 .llseek = default_llseek,
999 .release = gpu_state_release,
1000};
Chris Wilson98a2f412016-10-12 10:05:18 +01001001
Daniel Vetterd5442302012-04-27 15:17:40 +02001002static ssize_t
1003i915_error_state_write(struct file *filp,
1004 const char __user *ubuf,
1005 size_t cnt,
1006 loff_t *ppos)
1007{
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001008 struct i915_gpu_state *error = filp->private_data;
1009
1010 if (!error)
1011 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001012
1013 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001014 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +02001015
1016 return cnt;
1017}
1018
1019static int i915_error_state_open(struct inode *inode, struct file *file)
1020{
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001021 file->private_data = i915_first_error_state(inode->i_private);
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001022 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001023}
1024
Daniel Vetterd5442302012-04-27 15:17:40 +02001025static const struct file_operations i915_error_state_fops = {
1026 .owner = THIS_MODULE,
1027 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001028 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001029 .write = i915_error_state_write,
1030 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001031 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001032};
Chris Wilson98a2f412016-10-12 10:05:18 +01001033#endif
1034
Kees Cook647416f2013-03-10 14:10:06 -07001035static int
Kees Cook647416f2013-03-10 14:10:06 -07001036i915_next_seqno_set(void *data, u64 val)
Mika Kuoppala40633212012-12-04 15:12:00 +02001037{
David Weinehall36cdd012016-08-22 13:59:31 +03001038 struct drm_i915_private *dev_priv = data;
1039 struct drm_device *dev = &dev_priv->drm;
Mika Kuoppala40633212012-12-04 15:12:00 +02001040 int ret;
1041
Mika Kuoppala40633212012-12-04 15:12:00 +02001042 ret = mutex_lock_interruptible(&dev->struct_mutex);
1043 if (ret)
1044 return ret;
1045
Chris Wilson65c475c2018-01-02 15:12:31 +00001046 intel_runtime_pm_get(dev_priv);
Chris Wilson73cb9702016-10-28 13:58:46 +01001047 ret = i915_gem_set_global_seqno(dev, val);
Chris Wilson65c475c2018-01-02 15:12:31 +00001048 intel_runtime_pm_put(dev_priv);
1049
Mika Kuoppala40633212012-12-04 15:12:00 +02001050 mutex_unlock(&dev->struct_mutex);
1051
Kees Cook647416f2013-03-10 14:10:06 -07001052 return ret;
Mika Kuoppala40633212012-12-04 15:12:00 +02001053}
1054
Kees Cook647416f2013-03-10 14:10:06 -07001055DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
Chris Wilson9b6586a2017-02-23 07:44:08 +00001056 NULL, i915_next_seqno_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03001057 "0x%llx\n");
Mika Kuoppala40633212012-12-04 15:12:00 +02001058
Deepak Sadb4bd12014-03-31 11:30:02 +05301059static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001060{
David Weinehall36cdd012016-08-22 13:59:31 +03001061 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001062 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001063 int ret = 0;
1064
1065 intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001066
David Weinehall36cdd012016-08-22 13:59:31 +03001067 if (IS_GEN5(dev_priv)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001068 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074 MEMSTAT_VID_SHIFT);
1075 seq_printf(m, "Current P-state: %d\n",
1076 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001077 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001078 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001079
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001080 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001081
1082 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083 seq_printf(m, "Video Turbo Mode: %s\n",
1084 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085 seq_printf(m, "HW control enabled: %s\n",
1086 yesno(rpmodectl & GEN6_RP_ENABLE));
1087 seq_printf(m, "SW control enabled: %s\n",
1088 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089 GEN6_RP_MEDIA_SW_MODE));
1090
Wayne Boyer666a4532015-12-09 12:29:35 -08001091 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095 seq_printf(m, "actual GPU freq: %d MHz\n",
1096 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001099 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001100
1101 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001102 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001103
1104 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001105 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001106
1107 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001108 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001109
1110 seq_printf(m,
1111 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001112 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001113 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001114 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001115 u32 rp_state_limits;
1116 u32 gt_perf_status;
1117 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001118 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001119 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001120 u32 rpupei, rpcurup, rpprevup;
1121 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001122 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001123 int max_freq;
1124
Bob Paauwe35040562015-06-25 14:54:07 -07001125 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001126 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001127 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129 } else {
1130 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132 }
1133
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001134 /* RPSTAT1 is in the GT power well */
Mika Kuoppala59bad942015-01-16 11:34:40 +02001135 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001136
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001137 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001138 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301139 reqf >>= 23;
1140 else {
1141 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001142 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301143 reqf >>= 24;
1144 else
1145 reqf >>= 25;
1146 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001147 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001148
Chris Wilson0d8f9492014-03-27 09:06:14 +00001149 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
Jesse Barnesccab5c82011-01-18 15:49:25 -08001153 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301154 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001160 cagf = intel_gpu_freq(dev_priv,
1161 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001162
Mika Kuoppala59bad942015-01-16 11:34:40 +02001163 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001164
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001165 if (INTEL_GEN(dev_priv) >= 11) {
1166 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168 /*
1169 * The equivalent to the PM ISR & IIR cannot be read
1170 * without affecting the current state of the system
1171 */
1172 pm_isr = 0;
1173 pm_iir = 0;
1174 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001175 pm_ier = I915_READ(GEN8_GT_IER(2));
1176 pm_imr = I915_READ(GEN8_GT_IMR(2));
1177 pm_isr = I915_READ(GEN8_GT_ISR(2));
1178 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001179 } else {
1180 pm_ier = I915_READ(GEN6_PMIER);
1181 pm_imr = I915_READ(GEN6_PMIMR);
1182 pm_isr = I915_READ(GEN6_PMISR);
1183 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001184 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001185 pm_mask = I915_READ(GEN6_PMINTRMSK);
1186
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001187 seq_printf(m, "Video Turbo Mode: %s\n",
1188 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189 seq_printf(m, "HW control enabled: %s\n",
1190 yesno(rpmodectl & GEN6_RP_ENABLE));
1191 seq_printf(m, "SW control enabled: %s\n",
1192 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001194
1195 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196 pm_ier, pm_imr, pm_mask);
1197 if (INTEL_GEN(dev_priv) <= 10)
1198 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301200 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001201 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001202 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001203 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001204 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001205 seq_printf(m, "Render p-state VID: %d\n",
1206 gt_perf_status & 0xff);
1207 seq_printf(m, "Render p-state limit: %d\n",
1208 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001209 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001213 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001214 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301215 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilson60548c52018-07-31 14:26:29 +01001221 seq_printf(m, "Up threshold: %d%%\n",
1222 rps->power.up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001223
Akash Goeld6cda9c2016-04-23 00:05:46 +05301224 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1225 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1226 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1227 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1228 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1229 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilson60548c52018-07-31 14:26:29 +01001230 seq_printf(m, "Down threshold: %d%%\n",
1231 rps->power.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001232
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001233 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001234 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001235 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001236 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001237 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001238 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001239
1240 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001241 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001242 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001243 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001244 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001245
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001246 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001247 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001248 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001249 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001250 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001251 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001252 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001253 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001254
Chris Wilsond86ed342015-04-27 13:41:19 +01001255 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001256 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001257 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001258 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001259 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001260 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001261 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001262 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001263 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001264 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001265 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001266 seq_printf(m,
1267 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001268 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001269 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001270 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001271 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001272
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001273 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001274 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1275 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1276
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001277 intel_runtime_pm_put(dev_priv);
1278 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001279}
1280
Ben Widawskyd6369512016-09-20 16:54:32 +03001281static void i915_instdone_info(struct drm_i915_private *dev_priv,
1282 struct seq_file *m,
1283 struct intel_instdone *instdone)
1284{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001285 int slice;
1286 int subslice;
1287
Ben Widawskyd6369512016-09-20 16:54:32 +03001288 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1289 instdone->instdone);
1290
1291 if (INTEL_GEN(dev_priv) <= 3)
1292 return;
1293
1294 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1295 instdone->slice_common);
1296
1297 if (INTEL_GEN(dev_priv) <= 6)
1298 return;
1299
Ben Widawskyf9e61372016-09-20 16:54:33 +03001300 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1301 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1302 slice, subslice, instdone->sampler[slice][subslice]);
1303
1304 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1305 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1306 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001307}
1308
Chris Wilsonf6544492015-01-26 18:03:04 +02001309static int i915_hangcheck_info(struct seq_file *m, void *unused)
1310{
David Weinehall36cdd012016-08-22 13:59:31 +03001311 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001312 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001313 u64 acthd[I915_NUM_ENGINES];
1314 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001315 struct intel_instdone instdone;
Dave Gordonc3232b12016-03-23 18:19:53 +00001316 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001317
Chris Wilson8af29b02016-09-09 14:11:47 +01001318 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001319 seq_puts(m, "Wedged\n");
1320 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1321 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1322 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1323 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001324 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001325 seq_puts(m, "Waiter holding struct mutex\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001326 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
Chris Wilson8c185ec2017-03-16 17:13:02 +00001327 seq_puts(m, "struct_mutex blocked for reset\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001328
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001329 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001330 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001331 return 0;
1332 }
1333
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001334 intel_runtime_pm_get(dev_priv);
1335
Akash Goel3b3f1652016-10-13 22:44:48 +05301336 for_each_engine(engine, dev_priv, id) {
Chris Wilson7e37f882016-08-02 22:50:21 +01001337 acthd[id] = intel_engine_get_active_head(engine);
Chris Wilson1b7744e2016-07-01 17:23:17 +01001338 seqno[id] = intel_engine_get_seqno(engine);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001339 }
1340
Akash Goel3b3f1652016-10-13 22:44:48 +05301341 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001342
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001343 intel_runtime_pm_put(dev_priv);
1344
Chris Wilson8352aea2017-03-03 09:00:56 +00001345 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1346 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001347 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1348 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001349 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1350 seq_puts(m, "Hangcheck active, work pending\n");
1351 else
1352 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001353
Chris Wilsonf73b5672017-03-02 15:03:56 +00001354 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1355
Akash Goel3b3f1652016-10-13 22:44:48 +05301356 for_each_engine(engine, dev_priv, id) {
Chris Wilson33f53712016-10-04 21:11:32 +01001357 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1358 struct rb_node *rb;
1359
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001360 seq_printf(m, "%s:\n", engine->name);
Chris Wilson52d7f162018-04-30 14:15:00 +01001361 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
Chris Wilsoncb399ea2016-11-01 10:03:16 +00001362 engine->hangcheck.seqno, seqno[id],
Chris Wilson52d7f162018-04-30 14:15:00 +01001363 intel_engine_last_submit(engine));
Chris Wilson1fd00c0f2018-06-02 11:48:53 +01001364 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
Chris Wilson83348ba2016-08-09 17:47:51 +01001365 yesno(intel_engine_has_waiter(engine)),
1366 yesno(test_bit(engine->id,
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001367 &dev_priv->gpu_error.missed_irq_rings)),
Chris Wilson1fd00c0f2018-06-02 11:48:53 +01001368 yesno(engine->hangcheck.stalled),
1369 yesno(engine->hangcheck.wedged));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001370
Chris Wilson61d3dc72017-03-03 19:08:24 +00001371 spin_lock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001372 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
Geliang Tangf802cf72016-12-19 22:43:49 +08001373 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
Chris Wilson33f53712016-10-04 21:11:32 +01001374
1375 seq_printf(m, "\t%s [%d] waiting for %x\n",
1376 w->tsk->comm, w->tsk->pid, w->seqno);
1377 }
Chris Wilson61d3dc72017-03-03 19:08:24 +00001378 spin_unlock_irq(&b->rb_lock);
Chris Wilson33f53712016-10-04 21:11:32 +01001379
Chris Wilsonf6544492015-01-26 18:03:04 +02001380 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001381 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001382 (long long)acthd[id]);
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001383 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1384 hangcheck_action_to_str(engine->hangcheck.action),
1385 engine->hangcheck.action,
1386 jiffies_to_msecs(jiffies -
1387 engine->hangcheck.action_timestamp));
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001388
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001389 if (engine->id == RCS) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001390 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001391
Ben Widawskyd6369512016-09-20 16:54:32 +03001392 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001393
Ben Widawskyd6369512016-09-20 16:54:32 +03001394 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001395
Ben Widawskyd6369512016-09-20 16:54:32 +03001396 i915_instdone_info(dev_priv, m,
1397 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001398 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001399 }
1400
1401 return 0;
1402}
1403
Michel Thierry061d06a2017-06-20 10:57:49 +01001404static int i915_reset_info(struct seq_file *m, void *unused)
1405{
1406 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1407 struct i915_gpu_error *error = &dev_priv->gpu_error;
1408 struct intel_engine_cs *engine;
1409 enum intel_engine_id id;
1410
1411 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1412
1413 for_each_engine(engine, dev_priv, id) {
1414 seq_printf(m, "%s = %u\n", engine->name,
1415 i915_reset_engine_count(error, engine));
1416 }
1417
1418 return 0;
1419}
1420
Ben Widawsky4d855292011-12-12 19:34:16 -08001421static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001422{
David Weinehall36cdd012016-08-22 13:59:31 +03001423 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001424 u32 rgvmodectl, rstdbyctl;
1425 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001426
Ben Widawsky616fdb52011-10-05 11:44:54 -07001427 rgvmodectl = I915_READ(MEMMODECTL);
1428 rstdbyctl = I915_READ(RSTDBYCTL);
1429 crstandvid = I915_READ16(CRSTANDVID);
1430
Jani Nikula742f4912015-09-03 11:16:09 +03001431 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001432 seq_printf(m, "Boost freq: %d\n",
1433 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1434 MEMMODE_BOOST_FREQ_SHIFT);
1435 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001436 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001437 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001438 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001439 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001440 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001441 seq_printf(m, "Starting frequency: P%d\n",
1442 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001443 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001444 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001445 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1446 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1447 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1448 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001449 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001450 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001451 switch (rstdbyctl & RSX_STATUS_MASK) {
1452 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001453 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001454 break;
1455 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001456 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001457 break;
1458 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001459 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001460 break;
1461 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001462 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001463 break;
1464 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001465 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001466 break;
1467 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001468 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001469 break;
1470 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001471 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001472 break;
1473 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001474
1475 return 0;
1476}
1477
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001478static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001479{
Chris Wilson233ebf52017-03-23 10:19:44 +00001480 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001481 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001482 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001483
Chris Wilsond7a133d2017-09-07 14:44:41 +01001484 seq_printf(m, "user.bypass_count = %u\n",
1485 i915->uncore.user_forcewake.count);
1486
Chris Wilson233ebf52017-03-23 10:19:44 +00001487 for_each_fw_domain(fw_domain, i915, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001488 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001489 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001490 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001491
1492 return 0;
1493}
1494
Mika Kuoppala13628772017-03-15 17:43:02 +02001495static void print_rc6_res(struct seq_file *m,
1496 const char *title,
1497 const i915_reg_t reg)
1498{
1499 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1500
1501 seq_printf(m, "%s %u (%llu us)\n",
1502 title, I915_READ(reg),
1503 intel_rc6_residency_us(dev_priv, reg));
1504}
1505
Deepak S669ab5a2014-01-10 15:18:26 +05301506static int vlv_drpc_info(struct seq_file *m)
1507{
David Weinehall36cdd012016-08-22 13:59:31 +03001508 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001509 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301510
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001511 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301512 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1513
Deepak S669ab5a2014-01-10 15:18:26 +05301514 seq_printf(m, "RC6 Enabled: %s\n",
1515 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1516 GEN6_RC_CTL_EI_MODE(1))));
1517 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001518 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301519 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001520 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301521
Mika Kuoppala13628772017-03-15 17:43:02 +02001522 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1523 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001524
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001525 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301526}
1527
Ben Widawsky4d855292011-12-12 19:34:16 -08001528static int gen6_drpc_info(struct seq_file *m)
1529{
David Weinehall36cdd012016-08-22 13:59:31 +03001530 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001531 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301532 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001533
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001534 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001535 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001536
Ben Widawsky4d855292011-12-12 19:34:16 -08001537 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001538 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301539 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1540 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1541 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001542
Imre Deak51cc9ad2018-02-08 19:41:02 +02001543 if (INTEL_GEN(dev_priv) <= 7) {
1544 mutex_lock(&dev_priv->pcu_lock);
1545 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1546 &rc6vids);
1547 mutex_unlock(&dev_priv->pcu_lock);
1548 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001549
Eric Anholtfff24e22012-01-23 16:14:05 -08001550 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001551 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1552 seq_printf(m, "RC6 Enabled: %s\n",
1553 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001554 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301555 seq_printf(m, "Render Well Gating Enabled: %s\n",
1556 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1557 seq_printf(m, "Media Well Gating Enabled: %s\n",
1558 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1559 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001560 seq_printf(m, "Deep RC6 Enabled: %s\n",
1561 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1562 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1563 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001564 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001565 switch (gt_core_status & GEN6_RCn_MASK) {
1566 case GEN6_RC0:
1567 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001568 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001569 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001570 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001571 break;
1572 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001573 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001574 break;
1575 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001576 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001577 break;
1578 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001579 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001580 break;
1581 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001582 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001583 break;
1584 }
1585
1586 seq_printf(m, "Core Power Down: %s\n",
1587 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001588 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301589 seq_printf(m, "Render Power Well: %s\n",
1590 (gen9_powergate_status &
1591 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1592 seq_printf(m, "Media Power Well: %s\n",
1593 (gen9_powergate_status &
1594 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1595 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001596
1597 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001598 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1599 GEN6_GT_GFX_RC6_LOCKED);
1600 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1601 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1602 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001603
Imre Deak51cc9ad2018-02-08 19:41:02 +02001604 if (INTEL_GEN(dev_priv) <= 7) {
1605 seq_printf(m, "RC6 voltage: %dmV\n",
1606 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1607 seq_printf(m, "RC6+ voltage: %dmV\n",
1608 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1609 seq_printf(m, "RC6++ voltage: %dmV\n",
1610 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1611 }
1612
Akash Goelf2dd7572016-06-27 20:10:01 +05301613 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001614}
1615
1616static int i915_drpc_info(struct seq_file *m, void *unused)
1617{
David Weinehall36cdd012016-08-22 13:59:31 +03001618 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001619 int err;
1620
1621 intel_runtime_pm_get(dev_priv);
Ben Widawsky4d855292011-12-12 19:34:16 -08001622
David Weinehall36cdd012016-08-22 13:59:31 +03001623 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001624 err = vlv_drpc_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +03001625 else if (INTEL_GEN(dev_priv) >= 6)
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001626 err = gen6_drpc_info(m);
Ben Widawsky4d855292011-12-12 19:34:16 -08001627 else
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001628 err = ironlake_drpc_info(m);
1629
1630 intel_runtime_pm_put(dev_priv);
1631
1632 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001633}
1634
Daniel Vetter9a851782015-06-18 10:30:22 +02001635static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1636{
David Weinehall36cdd012016-08-22 13:59:31 +03001637 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001638
1639 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640 dev_priv->fb_tracking.busy_bits);
1641
1642 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643 dev_priv->fb_tracking.flip_bits);
1644
1645 return 0;
1646}
1647
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001648static int i915_fbc_status(struct seq_file *m, void *unused)
1649{
David Weinehall36cdd012016-08-22 13:59:31 +03001650 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001651 struct intel_fbc *fbc = &dev_priv->fbc;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001652
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001653 if (!HAS_FBC(dev_priv))
1654 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001655
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001656 intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001657 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001658
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001659 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001660 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001661 else
Chris Wilson31388722017-12-20 20:58:48 +00001662 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1663
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001664 if (intel_fbc_is_active(dev_priv)) {
1665 u32 mask;
1666
1667 if (INTEL_GEN(dev_priv) >= 8)
1668 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1669 else if (INTEL_GEN(dev_priv) >= 7)
1670 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1671 else if (INTEL_GEN(dev_priv) >= 5)
1672 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1673 else if (IS_G4X(dev_priv))
1674 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1675 else
1676 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1677 FBC_STAT_COMPRESSED);
1678
1679 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001680 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001681
Chris Wilson31388722017-12-20 20:58:48 +00001682 mutex_unlock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001683 intel_runtime_pm_put(dev_priv);
1684
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001685 return 0;
1686}
1687
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001688static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001689{
David Weinehall36cdd012016-08-22 13:59:31 +03001690 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001691
David Weinehall36cdd012016-08-22 13:59:31 +03001692 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001693 return -ENODEV;
1694
Rodrigo Vivida46f932014-08-01 02:04:45 -07001695 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001696
1697 return 0;
1698}
1699
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001700static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001701{
David Weinehall36cdd012016-08-22 13:59:31 +03001702 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001703 u32 reg;
1704
David Weinehall36cdd012016-08-22 13:59:31 +03001705 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001706 return -ENODEV;
1707
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001708 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001709
1710 reg = I915_READ(ILK_DPFC_CONTROL);
1711 dev_priv->fbc.false_color = val;
1712
1713 I915_WRITE(ILK_DPFC_CONTROL, val ?
1714 (reg | FBC_CTL_FALSE_COLOR) :
1715 (reg & ~FBC_CTL_FALSE_COLOR));
1716
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001717 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001718 return 0;
1719}
1720
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001721DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1722 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001723 "%llu\n");
1724
Paulo Zanoni92d44622013-05-31 16:33:24 -03001725static int i915_ips_status(struct seq_file *m, void *unused)
1726{
David Weinehall36cdd012016-08-22 13:59:31 +03001727 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Paulo Zanoni92d44622013-05-31 16:33:24 -03001728
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001729 if (!HAS_IPS(dev_priv))
1730 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001731
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001732 intel_runtime_pm_get(dev_priv);
1733
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001734 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001735 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001736
David Weinehall36cdd012016-08-22 13:59:31 +03001737 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001738 seq_puts(m, "Currently: unknown\n");
1739 } else {
1740 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1741 seq_puts(m, "Currently: enabled\n");
1742 else
1743 seq_puts(m, "Currently: disabled\n");
1744 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001745
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001746 intel_runtime_pm_put(dev_priv);
1747
Paulo Zanoni92d44622013-05-31 16:33:24 -03001748 return 0;
1749}
1750
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001751static int i915_sr_status(struct seq_file *m, void *unused)
1752{
David Weinehall36cdd012016-08-22 13:59:31 +03001753 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001754 bool sr_enabled = false;
1755
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001756 intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01001757 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001758
Chris Wilson7342a722017-03-09 14:20:49 +00001759 if (INTEL_GEN(dev_priv) >= 9)
1760 /* no global SR status; inspect per-plane WM */;
1761 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001762 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001763 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001764 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001765 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001766 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001767 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001768 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001769 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001770 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001771 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001772
Chris Wilson9c870d02016-10-24 13:42:15 +01001773 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001774 intel_runtime_pm_put(dev_priv);
1775
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001776 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001777
1778 return 0;
1779}
1780
Jesse Barnes7648fa92010-05-20 14:28:11 -07001781static int i915_emon_status(struct seq_file *m, void *unused)
1782{
David Weinehall36cdd012016-08-22 13:59:31 +03001783 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001785 unsigned long temp, chipset, gfx;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001786 int ret;
1787
David Weinehall36cdd012016-08-22 13:59:31 +03001788 if (!IS_GEN5(dev_priv))
Chris Wilson582be6b2012-04-30 19:35:02 +01001789 return -ENODEV;
1790
Chris Wilsonde227ef2010-07-03 07:58:38 +01001791 ret = mutex_lock_interruptible(&dev->struct_mutex);
1792 if (ret)
1793 return ret;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001794
1795 temp = i915_mch_val(dev_priv);
1796 chipset = i915_chipset_val(dev_priv);
1797 gfx = i915_gfx_val(dev_priv);
Chris Wilsonde227ef2010-07-03 07:58:38 +01001798 mutex_unlock(&dev->struct_mutex);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001799
1800 seq_printf(m, "GMCH temp: %ld\n", temp);
1801 seq_printf(m, "Chipset power: %ld\n", chipset);
1802 seq_printf(m, "GFX power: %ld\n", gfx);
1803 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1804
1805 return 0;
1806}
1807
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001808static int i915_ring_freq_table(struct seq_file *m, void *unused)
1809{
David Weinehall36cdd012016-08-22 13:59:31 +03001810 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001811 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301812 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001813 int gpu_freq, ia_freq;
1814 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001815
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001816 if (!HAS_LLC(dev_priv))
1817 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001818
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001819 intel_runtime_pm_get(dev_priv);
1820
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001821 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001822 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001823 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001824
Chris Wilsond586b5f2018-03-08 14:26:48 +00001825 min_gpu_freq = rps->min_freq;
1826 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001827 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301828 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001829 min_gpu_freq /= GEN9_FREQ_SCALER;
1830 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301831 }
1832
Damien Lespiau267f0c92013-06-24 22:59:48 +01001833 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001834
Akash Goelf936ec32015-06-29 14:50:22 +05301835 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001836 ia_freq = gpu_freq;
1837 sandybridge_pcode_read(dev_priv,
1838 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1839 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001840 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301841 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001842 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001843 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001844 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001845 ((ia_freq >> 0) & 0xff) * 100,
1846 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001847 }
1848
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001849 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001850
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001851out:
1852 intel_runtime_pm_put(dev_priv);
1853 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001854}
1855
Chris Wilson44834a62010-08-19 16:09:23 +01001856static int i915_opregion(struct seq_file *m, void *unused)
1857{
David Weinehall36cdd012016-08-22 13:59:31 +03001858 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1859 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001860 struct intel_opregion *opregion = &dev_priv->opregion;
1861 int ret;
1862
1863 ret = mutex_lock_interruptible(&dev->struct_mutex);
1864 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001865 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001866
Jani Nikula2455a8e2015-12-14 12:50:53 +02001867 if (opregion->header)
1868 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001869
1870 mutex_unlock(&dev->struct_mutex);
1871
Daniel Vetter0d38f002012-04-21 22:49:10 +02001872out:
Chris Wilson44834a62010-08-19 16:09:23 +01001873 return 0;
1874}
1875
Jani Nikulaada8f952015-12-15 13:17:12 +02001876static int i915_vbt(struct seq_file *m, void *unused)
1877{
David Weinehall36cdd012016-08-22 13:59:31 +03001878 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001879
1880 if (opregion->vbt)
1881 seq_write(m, opregion->vbt, opregion->vbt_size);
1882
1883 return 0;
1884}
1885
Chris Wilson37811fc2010-08-25 22:45:57 +01001886static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1887{
David Weinehall36cdd012016-08-22 13:59:31 +03001888 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1889 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301890 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001891 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001892 int ret;
1893
1894 ret = mutex_lock_interruptible(&dev->struct_mutex);
1895 if (ret)
1896 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001897
Daniel Vetter06957262015-08-10 13:34:08 +02001898#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001899 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001900 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001901
Chris Wilson25bcce92016-07-02 15:36:00 +01001902 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1903 fbdev_fb->base.width,
1904 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001905 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001906 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001907 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001908 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001909 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001910 seq_putc(m, '\n');
1911 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001912#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001913
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001914 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001915 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301916 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1917 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001918 continue;
1919
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001920 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001921 fb->base.width,
1922 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001923 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001924 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001925 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001926 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001927 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001928 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001929 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001930 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001931 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001932
1933 return 0;
1934}
1935
Chris Wilson7e37f882016-08-02 22:50:21 +01001936static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001937{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001938 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1939 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001940}
1941
Ben Widawskye76d3632011-03-19 18:14:29 -07001942static int i915_context_status(struct seq_file *m, void *unused)
1943{
David Weinehall36cdd012016-08-22 13:59:31 +03001944 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1945 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001946 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001947 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05301948 enum intel_engine_id id;
Dave Gordonc3232b12016-03-23 18:19:53 +00001949 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001950
Daniel Vetterf3d28872014-05-29 23:23:08 +02001951 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001952 if (ret)
1953 return ret;
1954
Chris Wilson829a0af2017-06-20 12:05:45 +01001955 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson288f1ce2018-09-04 16:31:17 +01001956 seq_puts(m, "HW context ");
1957 if (!list_empty(&ctx->hw_id_link))
1958 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1959 atomic_read(&ctx->hw_id_pin_count));
Chris Wilsonc84455b2016-08-15 10:49:08 +01001960 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001961 struct task_struct *task;
1962
Chris Wilsonc84455b2016-08-15 10:49:08 +01001963 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001964 if (task) {
1965 seq_printf(m, "(%s [%d]) ",
1966 task->comm, task->pid);
1967 put_task_struct(task);
1968 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001969 } else if (IS_ERR(ctx->file_priv)) {
1970 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001971 } else {
1972 seq_puts(m, "(kernel) ");
1973 }
1974
Chris Wilsonbca44d82016-05-24 14:53:41 +01001975 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1976 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001977
Akash Goel3b3f1652016-10-13 22:44:48 +05301978 for_each_engine(engine, dev_priv, id) {
Chris Wilsonab82a062018-04-30 14:15:01 +01001979 struct intel_context *ce =
1980 to_intel_context(ctx, engine);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001981
1982 seq_printf(m, "%s: ", engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001983 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001984 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001985 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001986 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001987 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001988 }
1989
Ben Widawskya33afea2013-09-17 21:12:45 -07001990 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001991 }
1992
Daniel Vetterf3d28872014-05-29 23:23:08 +02001993 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001994
1995 return 0;
1996}
1997
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001998static const char *swizzle_string(unsigned swizzle)
1999{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01002000 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002001 case I915_BIT_6_SWIZZLE_NONE:
2002 return "none";
2003 case I915_BIT_6_SWIZZLE_9:
2004 return "bit9";
2005 case I915_BIT_6_SWIZZLE_9_10:
2006 return "bit9/bit10";
2007 case I915_BIT_6_SWIZZLE_9_11:
2008 return "bit9/bit11";
2009 case I915_BIT_6_SWIZZLE_9_10_11:
2010 return "bit9/bit10/bit11";
2011 case I915_BIT_6_SWIZZLE_9_17:
2012 return "bit9/bit17";
2013 case I915_BIT_6_SWIZZLE_9_10_17:
2014 return "bit9/bit10/bit17";
2015 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09002016 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002017 }
2018
2019 return "bug";
2020}
2021
2022static int i915_swizzle_info(struct seq_file *m, void *data)
2023{
David Weinehall36cdd012016-08-22 13:59:31 +03002024 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002025
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002026 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02002027
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002028 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2029 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2030 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2031 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2032
David Weinehall36cdd012016-08-22 13:59:31 +03002033 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002034 seq_printf(m, "DDC = 0x%08x\n",
2035 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01002036 seq_printf(m, "DDC2 = 0x%08x\n",
2037 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002038 seq_printf(m, "C0DRB3 = 0x%04x\n",
2039 I915_READ16(C0DRB3));
2040 seq_printf(m, "C1DRB3 = 0x%04x\n",
2041 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03002042 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002043 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2044 I915_READ(MAD_DIMM_C0));
2045 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2046 I915_READ(MAD_DIMM_C1));
2047 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2048 I915_READ(MAD_DIMM_C2));
2049 seq_printf(m, "TILECTL = 0x%08x\n",
2050 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03002051 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07002052 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2053 I915_READ(GAMTARBMODE));
2054 else
2055 seq_printf(m, "ARB_MODE = 0x%08x\n",
2056 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01002057 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2058 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002059 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01002060
2061 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2062 seq_puts(m, "L-shaped memory detected\n");
2063
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002064 intel_runtime_pm_put(dev_priv);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002065
2066 return 0;
2067}
2068
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002069static int per_file_ctx(int id, void *ptr, void *data)
2070{
Chris Wilsone2efd132016-05-24 14:53:34 +01002071 struct i915_gem_context *ctx = ptr;
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002072 struct seq_file *m = data;
Daniel Vetterae6c48062014-08-06 15:04:53 +02002073 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2074
2075 if (!ppgtt) {
2076 seq_printf(m, " no ppgtt for context %d\n",
2077 ctx->user_handle);
2078 return 0;
2079 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002080
Oscar Mateof83d6512014-05-22 14:13:38 +01002081 if (i915_gem_context_is_default(ctx))
2082 seq_puts(m, " default context:\n");
2083 else
Oscar Mateo821d66d2014-07-03 16:28:00 +01002084 seq_printf(m, " context %d:\n", ctx->user_handle);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002085 ppgtt->debug_dump(ppgtt, m);
2086
2087 return 0;
2088}
2089
David Weinehall36cdd012016-08-22 13:59:31 +03002090static void gen8_ppgtt_info(struct seq_file *m,
2091 struct drm_i915_private *dev_priv)
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002092{
Ben Widawsky77df6772013-11-02 21:07:30 -07002093 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Akash Goel3b3f1652016-10-13 22:44:48 +05302094 struct intel_engine_cs *engine;
2095 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002096 int i;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002097
Ben Widawsky77df6772013-11-02 21:07:30 -07002098 if (!ppgtt)
2099 return;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002100
Akash Goel3b3f1652016-10-13 22:44:48 +05302101 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002102 seq_printf(m, "%s\n", engine->name);
Ben Widawsky77df6772013-11-02 21:07:30 -07002103 for (i = 0; i < 4; i++) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002104 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
Ben Widawsky77df6772013-11-02 21:07:30 -07002105 pdp <<= 32;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002106 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
Ville Syrjäläa2a5b152014-03-31 18:17:16 +03002107 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
Ben Widawsky77df6772013-11-02 21:07:30 -07002108 }
2109 }
2110}
2111
David Weinehall36cdd012016-08-22 13:59:31 +03002112static void gen6_ppgtt_info(struct seq_file *m,
2113 struct drm_i915_private *dev_priv)
Ben Widawsky77df6772013-11-02 21:07:30 -07002114{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002115 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302116 enum intel_engine_id id;
Ben Widawsky77df6772013-11-02 21:07:30 -07002117
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002118 if (IS_GEN6(dev_priv))
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002119 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2120
Akash Goel3b3f1652016-10-13 22:44:48 +05302121 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002122 seq_printf(m, "%s\n", engine->name);
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01002123 if (IS_GEN7(dev_priv))
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002124 seq_printf(m, "GFX_MODE: 0x%08x\n",
2125 I915_READ(RING_MODE_GEN7(engine)));
2126 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2127 I915_READ(RING_PP_DIR_BASE(engine)));
2128 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2129 I915_READ(RING_PP_DIR_BASE_READ(engine)));
2130 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2131 I915_READ(RING_PP_DIR_DCLV(engine)));
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002132 }
2133 if (dev_priv->mm.aliasing_ppgtt) {
2134 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2135
Damien Lespiau267f0c92013-06-24 22:59:48 +01002136 seq_puts(m, "aliasing PPGTT:\n");
Mika Kuoppala44159dd2015-06-25 18:35:07 +03002137 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002138
Ben Widawsky87d60b62013-12-06 14:11:29 -08002139 ppgtt->debug_dump(ppgtt, m);
Daniel Vetterae6c48062014-08-06 15:04:53 +02002140 }
Ben Widawsky1c60fef2013-12-06 14:11:30 -08002141
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002142 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
Ben Widawsky77df6772013-11-02 21:07:30 -07002143}
2144
2145static int i915_ppgtt_info(struct seq_file *m, void *data)
2146{
David Weinehall36cdd012016-08-22 13:59:31 +03002147 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2148 struct drm_device *dev = &dev_priv->drm;
Michel Thierryea91e402015-07-29 17:23:57 +01002149 struct drm_file *file;
Chris Wilson637ee292016-08-22 14:28:20 +01002150 int ret;
Ben Widawsky77df6772013-11-02 21:07:30 -07002151
Chris Wilson637ee292016-08-22 14:28:20 +01002152 mutex_lock(&dev->filelist_mutex);
2153 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawsky77df6772013-11-02 21:07:30 -07002154 if (ret)
Chris Wilson637ee292016-08-22 14:28:20 +01002155 goto out_unlock;
2156
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002157 intel_runtime_pm_get(dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002158
David Weinehall36cdd012016-08-22 13:59:31 +03002159 if (INTEL_GEN(dev_priv) >= 8)
2160 gen8_ppgtt_info(m, dev_priv);
2161 else if (INTEL_GEN(dev_priv) >= 6)
2162 gen6_ppgtt_info(m, dev_priv);
Ben Widawsky77df6772013-11-02 21:07:30 -07002163
Michel Thierryea91e402015-07-29 17:23:57 +01002164 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2165 struct drm_i915_file_private *file_priv = file->driver_priv;
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002166 struct task_struct *task;
Michel Thierryea91e402015-07-29 17:23:57 +01002167
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002168 task = get_pid_task(file->pid, PIDTYPE_PID);
Dan Carpenter06812762015-10-02 18:14:22 +03002169 if (!task) {
2170 ret = -ESRCH;
Chris Wilson637ee292016-08-22 14:28:20 +01002171 goto out_rpm;
Dan Carpenter06812762015-10-02 18:14:22 +03002172 }
Geliang Tang7cb5dff2015-09-25 03:58:11 -07002173 seq_printf(m, "\nproc: %s\n", task->comm);
2174 put_task_struct(task);
Michel Thierryea91e402015-07-29 17:23:57 +01002175 idr_for_each(&file_priv->context_idr, per_file_ctx,
2176 (void *)(unsigned long)m);
2177 }
2178
Chris Wilson637ee292016-08-22 14:28:20 +01002179out_rpm:
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002180 intel_runtime_pm_put(dev_priv);
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002181 mutex_unlock(&dev->struct_mutex);
Chris Wilson637ee292016-08-22 14:28:20 +01002182out_unlock:
2183 mutex_unlock(&dev->filelist_mutex);
Dan Carpenter06812762015-10-02 18:14:22 +03002184 return ret;
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01002185}
2186
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002187static int count_irq_waiters(struct drm_i915_private *i915)
2188{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002189 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05302190 enum intel_engine_id id;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002191 int count = 0;
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002192
Akash Goel3b3f1652016-10-13 22:44:48 +05302193 for_each_engine(engine, i915, id)
Chris Wilson688e6c72016-07-01 17:23:15 +01002194 count += intel_engine_has_waiter(engine);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002195
2196 return count;
2197}
2198
Chris Wilson7466c292016-08-15 09:49:33 +01002199static const char *rps_power_to_str(unsigned int power)
2200{
2201 static const char * const strings[] = {
2202 [LOW_POWER] = "low power",
2203 [BETWEEN] = "mixed",
2204 [HIGH_POWER] = "high power",
2205 };
2206
2207 if (power >= ARRAY_SIZE(strings) || !strings[power])
2208 return "unknown";
2209
2210 return strings[power];
2211}
2212
Chris Wilson1854d5c2015-04-07 16:20:32 +01002213static int i915_rps_boost_info(struct seq_file *m, void *data)
2214{
David Weinehall36cdd012016-08-22 13:59:31 +03002215 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2216 struct drm_device *dev = &dev_priv->drm;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002217 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002218 u32 act_freq = rps->cur_freq;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002219 struct drm_file *file;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002220
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002221 if (intel_runtime_pm_get_if_in_use(dev_priv)) {
2222 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2223 mutex_lock(&dev_priv->pcu_lock);
2224 act_freq = vlv_punit_read(dev_priv,
2225 PUNIT_REG_GPU_FREQ_STS);
2226 act_freq = (act_freq >> 8) & 0xff;
2227 mutex_unlock(&dev_priv->pcu_lock);
2228 } else {
2229 act_freq = intel_get_cagf(dev_priv,
2230 I915_READ(GEN6_RPSTAT1));
2231 }
2232 intel_runtime_pm_put(dev_priv);
2233 }
2234
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002235 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002236 seq_printf(m, "GPU busy? %s [%d requests]\n",
2237 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilsonf5a4c672015-04-27 13:41:23 +01002238 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002239 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002240 atomic_read(&rps->num_waiters));
Chris Wilson60548c52018-07-31 14:26:29 +01002241 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002242 seq_printf(m, "Frequency requested %d, actual %d\n",
2243 intel_gpu_freq(dev_priv, rps->cur_freq),
2244 intel_gpu_freq(dev_priv, act_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002245 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002246 intel_gpu_freq(dev_priv, rps->min_freq),
2247 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2248 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2249 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002250 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002251 intel_gpu_freq(dev_priv, rps->idle_freq),
2252 intel_gpu_freq(dev_priv, rps->efficient_freq),
2253 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002254
2255 mutex_lock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002256 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2257 struct drm_i915_file_private *file_priv = file->driver_priv;
2258 struct task_struct *task;
2259
2260 rcu_read_lock();
2261 task = pid_task(file->pid, PIDTYPE_PID);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002262 seq_printf(m, "%s [%d]: %d boosts\n",
Chris Wilson1854d5c2015-04-07 16:20:32 +01002263 task ? task->comm : "<unknown>",
2264 task ? task->pid : -1,
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002265 atomic_read(&file_priv->rps_client.boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002266 rcu_read_unlock();
2267 }
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002268 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002269 atomic_read(&rps->boosts));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002270 mutex_unlock(&dev->filelist_mutex);
Chris Wilson1854d5c2015-04-07 16:20:32 +01002271
Chris Wilson7466c292016-08-15 09:49:33 +01002272 if (INTEL_GEN(dev_priv) >= 6 &&
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002273 rps->enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002274 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002275 u32 rpup, rpupei;
2276 u32 rpdown, rpdownei;
2277
2278 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2279 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2280 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2281 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2282 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2283 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2284
2285 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Chris Wilson60548c52018-07-31 14:26:29 +01002286 rps_power_to_str(rps->power.mode));
Chris Wilson7466c292016-08-15 09:49:33 +01002287 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002288 rpup && rpupei ? 100 * rpup / rpupei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002289 rps->power.up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002290 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002291 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002292 rps->power.down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002293 } else {
2294 seq_puts(m, "\nRPS Autotuning inactive\n");
2295 }
2296
Chris Wilson8d3afd72015-05-21 21:01:47 +01002297 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002298}
2299
Ben Widawsky63573eb2013-07-04 11:02:07 -07002300static int i915_llc(struct seq_file *m, void *data)
2301{
David Weinehall36cdd012016-08-22 13:59:31 +03002302 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002303 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002304
David Weinehall36cdd012016-08-22 13:59:31 +03002305 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002306 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2307 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002308
2309 return 0;
2310}
2311
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002312static int i915_huc_load_status_info(struct seq_file *m, void *data)
2313{
2314 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002315 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002316
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002317 if (!HAS_HUC(dev_priv))
2318 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002319
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002320 p = drm_seq_file_printer(m);
2321 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002322
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302323 intel_runtime_pm_get(dev_priv);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002324 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302325 intel_runtime_pm_put(dev_priv);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002326
2327 return 0;
2328}
2329
Alex Daifdf5d352015-08-12 15:43:37 +01002330static int i915_guc_load_status_info(struct seq_file *m, void *data)
2331{
David Weinehall36cdd012016-08-22 13:59:31 +03002332 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002333 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002334 u32 tmp, i;
2335
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002336 if (!HAS_GUC(dev_priv))
2337 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002338
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002339 p = drm_seq_file_printer(m);
2340 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002341
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302342 intel_runtime_pm_get(dev_priv);
2343
Alex Daifdf5d352015-08-12 15:43:37 +01002344 tmp = I915_READ(GUC_STATUS);
2345
2346 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2347 seq_printf(m, "\tBootrom status = 0x%x\n",
2348 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2349 seq_printf(m, "\tuKernel status = 0x%x\n",
2350 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2351 seq_printf(m, "\tMIA Core status = 0x%x\n",
2352 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2353 seq_puts(m, "\nScratch registers:\n");
2354 for (i = 0; i < 16; i++)
2355 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2356
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302357 intel_runtime_pm_put(dev_priv);
2358
Alex Daifdf5d352015-08-12 15:43:37 +01002359 return 0;
2360}
2361
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002362static const char *
2363stringify_guc_log_type(enum guc_log_buffer_type type)
2364{
2365 switch (type) {
2366 case GUC_ISR_LOG_BUFFER:
2367 return "ISR";
2368 case GUC_DPC_LOG_BUFFER:
2369 return "DPC";
2370 case GUC_CRASH_DUMP_LOG_BUFFER:
2371 return "CRASH";
2372 default:
2373 MISSING_CASE(type);
2374 }
2375
2376 return "";
2377}
2378
Akash Goel5aa1ee42016-10-12 21:54:36 +05302379static void i915_guc_log_info(struct seq_file *m,
2380 struct drm_i915_private *dev_priv)
2381{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002382 struct intel_guc_log *log = &dev_priv->guc.log;
2383 enum guc_log_buffer_type type;
2384
2385 if (!intel_guc_log_relay_enabled(log)) {
2386 seq_puts(m, "GuC log relay disabled\n");
2387 return;
2388 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302389
Michał Winiarskidb557992018-03-19 10:53:43 +01002390 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302391
Michał Winiarski6a96be22018-03-19 10:53:42 +01002392 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002393 log->relay.full_count);
2394
2395 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2396 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2397 stringify_guc_log_type(type),
2398 log->stats[type].flush,
2399 log->stats[type].sampled_overflow);
2400 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302401}
2402
Dave Gordon8b417c22015-08-12 15:43:44 +01002403static void i915_guc_client_info(struct seq_file *m,
2404 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302405 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002406{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002407 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002408 enum intel_engine_id id;
Dave Gordon8b417c22015-08-12 15:43:44 +01002409 uint64_t tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002410
Oscar Mateob09935a2017-03-22 10:39:53 -07002411 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2412 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002413 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2414 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002415
Akash Goel3b3f1652016-10-13 22:44:48 +05302416 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002417 u64 submissions = client->submissions[id];
2418 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002419 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002420 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002421 }
2422 seq_printf(m, "\tTotal: %llu\n", tot);
2423}
2424
2425static int i915_guc_info(struct seq_file *m, void *data)
2426{
David Weinehall36cdd012016-08-22 13:59:31 +03002427 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002428 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002429
Michał Winiarskidb557992018-03-19 10:53:43 +01002430 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002431 return -ENODEV;
2432
Michał Winiarskidb557992018-03-19 10:53:43 +01002433 i915_guc_log_info(m, dev_priv);
2434
2435 if (!USES_GUC_SUBMISSION(dev_priv))
2436 return 0;
2437
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002438 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002439
Michał Winiarskidb557992018-03-19 10:53:43 +01002440 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002441 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002442 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002443
Chris Wilson334636c2016-11-29 12:10:20 +00002444 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2445 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002446 if (guc->preempt_client) {
2447 seq_printf(m, "\nGuC preempt client @ %p:\n",
2448 guc->preempt_client);
2449 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2450 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002451
2452 /* Add more as required ... */
2453
2454 return 0;
2455}
2456
Oscar Mateoa8b93702017-05-10 15:04:51 +00002457static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002458{
David Weinehall36cdd012016-08-22 13:59:31 +03002459 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002460 const struct intel_guc *guc = &dev_priv->guc;
2461 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302462 struct intel_guc_client *client = guc->execbuf_client;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002463 unsigned int tmp;
2464 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002465
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002466 if (!USES_GUC_SUBMISSION(dev_priv))
2467 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002468
Oscar Mateoa8b93702017-05-10 15:04:51 +00002469 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2470 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002471
Oscar Mateoa8b93702017-05-10 15:04:51 +00002472 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2473 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002474
Oscar Mateoa8b93702017-05-10 15:04:51 +00002475 seq_printf(m, "GuC stage descriptor %u:\n", index);
2476 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2477 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2478 seq_printf(m, "\tPriority: %d\n", desc->priority);
2479 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2480 seq_printf(m, "\tEngines used: 0x%x\n",
2481 desc->engines_used);
2482 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2483 desc->db_trigger_phy,
2484 desc->db_trigger_cpu,
2485 desc->db_trigger_uk);
2486 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2487 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002488 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002489 desc->wq_addr, desc->wq_size);
2490 seq_putc(m, '\n');
2491
2492 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2493 u32 guc_engine_id = engine->guc_id;
2494 struct guc_execlist_context *lrc =
2495 &desc->lrc[guc_engine_id];
2496
2497 seq_printf(m, "\t%s LRC:\n", engine->name);
2498 seq_printf(m, "\t\tContext desc: 0x%x\n",
2499 lrc->context_desc);
2500 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2501 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2502 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2503 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2504 seq_putc(m, '\n');
2505 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002506 }
2507
Oscar Mateoa8b93702017-05-10 15:04:51 +00002508 return 0;
2509}
2510
Alex Dai4c7e77f2015-08-12 15:43:40 +01002511static int i915_guc_log_dump(struct seq_file *m, void *data)
2512{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002513 struct drm_info_node *node = m->private;
2514 struct drm_i915_private *dev_priv = node_to_i915(node);
2515 bool dump_load_err = !!node->info_ent->data;
2516 struct drm_i915_gem_object *obj = NULL;
2517 u32 *log;
2518 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002519
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002520 if (!HAS_GUC(dev_priv))
2521 return -ENODEV;
2522
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002523 if (dump_load_err)
2524 obj = dev_priv->guc.load_err_log;
2525 else if (dev_priv->guc.log.vma)
2526 obj = dev_priv->guc.log.vma->obj;
2527
2528 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002529 return 0;
2530
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002531 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2532 if (IS_ERR(log)) {
2533 DRM_DEBUG("Failed to pin object\n");
2534 seq_puts(m, "(log data unaccessible)\n");
2535 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002536 }
2537
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002538 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2539 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2540 *(log + i), *(log + i + 1),
2541 *(log + i + 2), *(log + i + 3));
2542
Alex Dai4c7e77f2015-08-12 15:43:40 +01002543 seq_putc(m, '\n');
2544
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002545 i915_gem_object_unpin_map(obj);
2546
Alex Dai4c7e77f2015-08-12 15:43:40 +01002547 return 0;
2548}
2549
Michał Winiarski4977a282018-03-19 10:53:40 +01002550static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302551{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002552 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302553
Michał Winiarski86aa8242018-03-08 16:46:53 +01002554 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002555 return -ENODEV;
2556
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002557 *val = intel_guc_log_get_level(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302558
2559 return 0;
2560}
2561
Michał Winiarski4977a282018-03-19 10:53:40 +01002562static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302563{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002564 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302565
Michał Winiarski86aa8242018-03-08 16:46:53 +01002566 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002567 return -ENODEV;
2568
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002569 return intel_guc_log_set_level(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302570}
2571
Michał Winiarski4977a282018-03-19 10:53:40 +01002572DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2573 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302574 "%lld\n");
2575
Michał Winiarski4977a282018-03-19 10:53:40 +01002576static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2577{
2578 struct drm_i915_private *dev_priv = inode->i_private;
2579
2580 if (!USES_GUC(dev_priv))
2581 return -ENODEV;
2582
2583 file->private_data = &dev_priv->guc.log;
2584
2585 return intel_guc_log_relay_open(&dev_priv->guc.log);
2586}
2587
2588static ssize_t
2589i915_guc_log_relay_write(struct file *filp,
2590 const char __user *ubuf,
2591 size_t cnt,
2592 loff_t *ppos)
2593{
2594 struct intel_guc_log *log = filp->private_data;
2595
2596 intel_guc_log_relay_flush(log);
2597
2598 return cnt;
2599}
2600
2601static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2602{
2603 struct drm_i915_private *dev_priv = inode->i_private;
2604
2605 intel_guc_log_relay_close(&dev_priv->guc.log);
2606
2607 return 0;
2608}
2609
2610static const struct file_operations i915_guc_log_relay_fops = {
2611 .owner = THIS_MODULE,
2612 .open = i915_guc_log_relay_open,
2613 .write = i915_guc_log_relay_write,
2614 .release = i915_guc_log_relay_release,
2615};
2616
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002617static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2618{
2619 u8 val;
2620 static const char * const sink_status[] = {
2621 "inactive",
2622 "transition to active, capture and display",
2623 "active, display from RFB",
2624 "active, capture and display on sink device timings",
2625 "transition to inactive, capture and display, timing re-sync",
2626 "reserved",
2627 "reserved",
2628 "sink internal error",
2629 };
2630 struct drm_connector *connector = m->private;
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002631 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002632 struct intel_dp *intel_dp =
2633 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002634 int ret;
2635
2636 if (!CAN_PSR(dev_priv)) {
2637 seq_puts(m, "PSR Unsupported\n");
2638 return -ENODEV;
2639 }
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002640
2641 if (connector->status != connector_status_connected)
2642 return -ENODEV;
2643
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002644 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2645
2646 if (ret == 1) {
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002647 const char *str = "unknown";
2648
2649 val &= DP_PSR_SINK_STATE_MASK;
2650 if (val < ARRAY_SIZE(sink_status))
2651 str = sink_status[val];
2652 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2653 } else {
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002654 return ret;
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002655 }
2656
2657 return 0;
2658}
2659DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2660
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302661static void
2662psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002663{
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302664 u32 val, psr_status;
Chris Wilsonb86bef202017-01-16 13:06:21 +00002665
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302666 if (dev_priv->psr.psr2_enabled) {
2667 static const char * const live_status[] = {
2668 "IDLE",
2669 "CAPTURE",
2670 "CAPTURE_FS",
2671 "SLEEP",
2672 "BUFON_FW",
2673 "ML_UP",
2674 "SU_STANDBY",
2675 "FAST_SLEEP",
2676 "DEEP_SLEEP",
2677 "BUF_ON",
2678 "TG_ON"
2679 };
2680 psr_status = I915_READ(EDP_PSR2_STATUS);
2681 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2682 EDP_PSR2_STATUS_STATE_SHIFT;
2683 if (val < ARRAY_SIZE(live_status)) {
2684 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2685 psr_status, live_status[val]);
2686 return;
2687 }
2688 } else {
2689 static const char * const live_status[] = {
2690 "IDLE",
2691 "SRDONACK",
2692 "SRDENT",
2693 "BUFOFF",
2694 "BUFON",
2695 "AUXACK",
2696 "SRDOFFACK",
2697 "SRDENT_ON",
2698 };
2699 psr_status = I915_READ(EDP_PSR_STATUS);
2700 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2701 EDP_PSR_STATUS_STATE_SHIFT;
2702 if (val < ARRAY_SIZE(live_status)) {
2703 seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2704 psr_status, live_status[val]);
2705 return;
2706 }
2707 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002708
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302709 seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
Chris Wilsonb86bef202017-01-16 13:06:21 +00002710}
2711
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002712static int i915_edp_psr_status(struct seq_file *m, void *data)
2713{
David Weinehall36cdd012016-08-22 13:59:31 +03002714 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Rodrigo Vivia031d702013-10-03 16:15:06 -03002715 u32 psrperf = 0;
2716 bool enabled = false;
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002717 bool sink_support;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002718
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002719 if (!HAS_PSR(dev_priv))
2720 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002721
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002722 sink_support = dev_priv->psr.sink_support;
2723 seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2724 if (!sink_support)
2725 return 0;
2726
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002727 intel_runtime_pm_get(dev_priv);
2728
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002729 mutex_lock(&dev_priv->psr.lock);
Azhar Shaikh0577ab42018-08-22 10:23:48 -07002730 seq_printf(m, "PSR mode: %s\n",
2731 dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002732 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002733 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2734 dev_priv->psr.busy_frontbuffer_bits);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002735
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002736 if (dev_priv->psr.psr2_enabled)
2737 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2738 else
2739 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002740
2741 seq_printf(m, "Main link in standby mode: %s\n",
2742 yesno(dev_priv->psr.link_standby));
2743
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002744 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002745
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002746 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002747 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2748 */
David Weinehall36cdd012016-08-22 13:59:31 +03002749 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Ville Syrjälä443a3892015-11-11 20:34:15 +02002750 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
Rodrigo Vivia031d702013-10-03 16:15:06 -03002751 EDP_PSR_PERF_CNT_MASK;
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002752
2753 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2754 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302755
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302756 psr_source_status(dev_priv, m);
Daniel Vetterfa128fa2014-07-11 10:30:17 -07002757 mutex_unlock(&dev_priv->psr.lock);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002758
Dhinakaran Pandiyan9844d4b2018-08-21 15:11:55 -07002759 if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002760 seq_printf(m, "Last attempted entry at: %lld\n",
2761 dev_priv->psr.last_entry_attempt);
2762 seq_printf(m, "Last exit at: %lld\n",
2763 dev_priv->psr.last_exit);
2764 }
2765
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002766 intel_runtime_pm_put(dev_priv);
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002767 return 0;
2768}
2769
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002770static int
2771i915_edp_psr_debug_set(void *data, u64 val)
2772{
2773 struct drm_i915_private *dev_priv = data;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002774 struct drm_modeset_acquire_ctx ctx;
2775 int ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002776
2777 if (!CAN_PSR(dev_priv))
2778 return -ENODEV;
2779
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002780 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002781
2782 intel_runtime_pm_get(dev_priv);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002783
2784 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2785
2786retry:
2787 ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2788 if (ret == -EDEADLK) {
2789 ret = drm_modeset_backoff(&ctx);
2790 if (!ret)
2791 goto retry;
2792 }
2793
2794 drm_modeset_drop_locks(&ctx);
2795 drm_modeset_acquire_fini(&ctx);
2796
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002797 intel_runtime_pm_put(dev_priv);
2798
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002799 return ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002800}
2801
2802static int
2803i915_edp_psr_debug_get(void *data, u64 *val)
2804{
2805 struct drm_i915_private *dev_priv = data;
2806
2807 if (!CAN_PSR(dev_priv))
2808 return -ENODEV;
2809
2810 *val = READ_ONCE(dev_priv->psr.debug);
2811 return 0;
2812}
2813
2814DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2815 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2816 "%llu\n");
2817
Jesse Barnesec013e72013-08-20 10:29:23 +01002818static int i915_energy_uJ(struct seq_file *m, void *data)
2819{
David Weinehall36cdd012016-08-22 13:59:31 +03002820 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002821 unsigned long long power;
Jesse Barnesec013e72013-08-20 10:29:23 +01002822 u32 units;
2823
David Weinehall36cdd012016-08-22 13:59:31 +03002824 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002825 return -ENODEV;
2826
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002827 intel_runtime_pm_get(dev_priv);
2828
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002829 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2830 intel_runtime_pm_put(dev_priv);
2831 return -ENODEV;
2832 }
2833
2834 units = (power & 0x1f00) >> 8;
Jesse Barnesec013e72013-08-20 10:29:23 +01002835 power = I915_READ(MCH_SECP_NRG_STTS);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002836 power = (1000000 * power) >> units; /* convert to uJ */
Jesse Barnesec013e72013-08-20 10:29:23 +01002837
Paulo Zanoni36623ef2014-02-21 13:52:23 -03002838 intel_runtime_pm_put(dev_priv);
2839
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002840 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002841
2842 return 0;
2843}
2844
Damien Lespiau6455c872015-06-04 18:23:57 +01002845static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002846{
David Weinehall36cdd012016-08-22 13:59:31 +03002847 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002848 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002849
Chris Wilsona156e642016-04-03 14:14:21 +01002850 if (!HAS_RUNTIME_PM(dev_priv))
2851 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002852
Chris Wilson6f561032018-01-24 11:36:07 +00002853 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2854 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
Paulo Zanoni371db662013-08-19 13:18:10 -03002855 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002856 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002857#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002858 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002859 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002860#else
2861 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2862#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002863 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002864 pci_power_name(pdev->current_state),
2865 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002866
Jesse Barnesec013e72013-08-20 10:29:23 +01002867 return 0;
2868}
2869
Imre Deak1da51582013-11-25 17:15:35 +02002870static int i915_power_domain_info(struct seq_file *m, void *unused)
2871{
David Weinehall36cdd012016-08-22 13:59:31 +03002872 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002873 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2874 int i;
2875
2876 mutex_lock(&power_domains->lock);
2877
2878 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2879 for (i = 0; i < power_domains->power_well_count; i++) {
2880 struct i915_power_well *power_well;
2881 enum intel_display_power_domain power_domain;
2882
2883 power_well = &power_domains->power_wells[i];
Imre Deakf28ec6f2018-08-06 12:58:37 +03002884 seq_printf(m, "%-25s %d\n", power_well->desc->name,
Imre Deak1da51582013-11-25 17:15:35 +02002885 power_well->count);
2886
Imre Deakf28ec6f2018-08-06 12:58:37 +03002887 for_each_power_domain(power_domain, power_well->desc->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002888 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002889 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002890 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002891 }
2892
2893 mutex_unlock(&power_domains->lock);
2894
2895 return 0;
2896}
2897
Damien Lespiaub7cec662015-10-27 14:47:01 +02002898static int i915_dmc_info(struct seq_file *m, void *unused)
2899{
David Weinehall36cdd012016-08-22 13:59:31 +03002900 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Damien Lespiaub7cec662015-10-27 14:47:01 +02002901 struct intel_csr *csr;
2902
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002903 if (!HAS_CSR(dev_priv))
2904 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002905
2906 csr = &dev_priv->csr;
2907
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002908 intel_runtime_pm_get(dev_priv);
2909
Damien Lespiaub7cec662015-10-27 14:47:01 +02002910 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2911 seq_printf(m, "path: %s\n", csr->fw_path);
2912
2913 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002914 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002915
2916 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2917 CSR_VERSION_MINOR(csr->version));
2918
Jyoti Yadav27d7aaa2018-10-05 14:08:46 -04002919 if (IS_BROXTON(dev_priv)) {
2920 seq_printf(m, "DC3 -> DC5 count: %d\n",
2921 I915_READ(BXT_CSR_DC3_DC5_COUNT));
2922 } else if (IS_GEN(dev_priv, 9, 11)) {
Damien Lespiau83372062015-10-30 17:53:32 +02002923 seq_printf(m, "DC3 -> DC5 count: %d\n",
2924 I915_READ(SKL_CSR_DC3_DC5_COUNT));
2925 seq_printf(m, "DC5 -> DC6 count: %d\n",
2926 I915_READ(SKL_CSR_DC5_DC6_COUNT));
2927 }
2928
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002929out:
2930 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2931 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2932 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2933
Damien Lespiau83372062015-10-30 17:53:32 +02002934 intel_runtime_pm_put(dev_priv);
2935
Damien Lespiaub7cec662015-10-27 14:47:01 +02002936 return 0;
2937}
2938
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002939static void intel_seq_print_mode(struct seq_file *m, int tabs,
2940 struct drm_display_mode *mode)
2941{
2942 int i;
2943
2944 for (i = 0; i < tabs; i++)
2945 seq_putc(m, '\t');
2946
2947 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2948 mode->base.id, mode->name,
2949 mode->vrefresh, mode->clock,
2950 mode->hdisplay, mode->hsync_start,
2951 mode->hsync_end, mode->htotal,
2952 mode->vdisplay, mode->vsync_start,
2953 mode->vsync_end, mode->vtotal,
2954 mode->type, mode->flags);
2955}
2956
2957static void intel_encoder_info(struct seq_file *m,
2958 struct intel_crtc *intel_crtc,
2959 struct intel_encoder *intel_encoder)
2960{
David Weinehall36cdd012016-08-22 13:59:31 +03002961 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2962 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002963 struct drm_crtc *crtc = &intel_crtc->base;
2964 struct intel_connector *intel_connector;
2965 struct drm_encoder *encoder;
2966
2967 encoder = &intel_encoder->base;
2968 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002969 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002970 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2971 struct drm_connector *connector = &intel_connector->base;
2972 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2973 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002974 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002975 drm_get_connector_status_name(connector->status));
2976 if (connector->status == connector_status_connected) {
2977 struct drm_display_mode *mode = &crtc->mode;
2978 seq_printf(m, ", mode:\n");
2979 intel_seq_print_mode(m, 2, mode);
2980 } else {
2981 seq_putc(m, '\n');
2982 }
2983 }
2984}
2985
2986static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2987{
David Weinehall36cdd012016-08-22 13:59:31 +03002988 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2989 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002990 struct drm_crtc *crtc = &intel_crtc->base;
2991 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002992 struct drm_plane_state *plane_state = crtc->primary->state;
2993 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002994
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002995 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002996 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002997 fb->base.id, plane_state->src_x >> 16,
2998 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002999 else
3000 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003001 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3002 intel_encoder_info(m, intel_crtc, intel_encoder);
3003}
3004
3005static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3006{
3007 struct drm_display_mode *mode = panel->fixed_mode;
3008
3009 seq_printf(m, "\tfixed mode:\n");
3010 intel_seq_print_mode(m, 2, mode);
3011}
3012
3013static void intel_dp_info(struct seq_file *m,
3014 struct intel_connector *intel_connector)
3015{
3016 struct intel_encoder *intel_encoder = intel_connector->encoder;
3017 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3018
3019 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03003020 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003021 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003022 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03003023
3024 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3025 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003026}
3027
Libin Yang9a148a92016-11-28 20:07:05 +08003028static void intel_dp_mst_info(struct seq_file *m,
3029 struct intel_connector *intel_connector)
3030{
3031 struct intel_encoder *intel_encoder = intel_connector->encoder;
3032 struct intel_dp_mst_encoder *intel_mst =
3033 enc_to_mst(&intel_encoder->base);
3034 struct intel_digital_port *intel_dig_port = intel_mst->primary;
3035 struct intel_dp *intel_dp = &intel_dig_port->dp;
3036 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3037 intel_connector->port);
3038
3039 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3040}
3041
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003042static void intel_hdmi_info(struct seq_file *m,
3043 struct intel_connector *intel_connector)
3044{
3045 struct intel_encoder *intel_encoder = intel_connector->encoder;
3046 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3047
Jani Nikula742f4912015-09-03 11:16:09 +03003048 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003049}
3050
3051static void intel_lvds_info(struct seq_file *m,
3052 struct intel_connector *intel_connector)
3053{
3054 intel_panel_info(m, &intel_connector->panel);
3055}
3056
3057static void intel_connector_info(struct seq_file *m,
3058 struct drm_connector *connector)
3059{
3060 struct intel_connector *intel_connector = to_intel_connector(connector);
3061 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08003062 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003063
3064 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03003065 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003066 drm_get_connector_status_name(connector->status));
José Roberto de Souza3e037f92018-10-30 14:57:46 -07003067
3068 if (connector->status == connector_status_disconnected)
3069 return;
3070
3071 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3072 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3073 connector->display_info.width_mm,
3074 connector->display_info.height_mm);
3075 seq_printf(m, "\tsubpixel order: %s\n",
3076 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3077 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003078
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02003079 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003080 return;
3081
3082 switch (connector->connector_type) {
3083 case DRM_MODE_CONNECTOR_DisplayPort:
3084 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08003085 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3086 intel_dp_mst_info(m, intel_connector);
3087 else
3088 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003089 break;
3090 case DRM_MODE_CONNECTOR_LVDS:
3091 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10003092 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003093 break;
3094 case DRM_MODE_CONNECTOR_HDMIA:
3095 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03003096 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02003097 intel_hdmi_info(m, intel_connector);
3098 break;
3099 default:
3100 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10003101 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003102
Jesse Barnesf103fc72014-02-20 12:39:57 -08003103 seq_printf(m, "\tmodes:\n");
3104 list_for_each_entry(mode, &connector->modes, head)
3105 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003106}
3107
Robert Fekete3abc4e02015-10-27 16:58:32 +01003108static const char *plane_type(enum drm_plane_type type)
3109{
3110 switch (type) {
3111 case DRM_PLANE_TYPE_OVERLAY:
3112 return "OVL";
3113 case DRM_PLANE_TYPE_PRIMARY:
3114 return "PRI";
3115 case DRM_PLANE_TYPE_CURSOR:
3116 return "CUR";
3117 /*
3118 * Deliberately omitting default: to generate compiler warnings
3119 * when a new drm_plane_type gets added.
3120 */
3121 }
3122
3123 return "unknown";
3124}
3125
3126static const char *plane_rotation(unsigned int rotation)
3127{
3128 static char buf[48];
3129 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04003130 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01003131 * will print them all to visualize if the values are misused
3132 */
3133 snprintf(buf, sizeof(buf),
3134 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04003135 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3136 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3137 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3138 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3139 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3140 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01003141 rotation);
3142
3143 return buf;
3144}
3145
3146static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3147{
David Weinehall36cdd012016-08-22 13:59:31 +03003148 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3149 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003150 struct intel_plane *intel_plane;
3151
3152 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3153 struct drm_plane_state *state;
3154 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003155 struct drm_format_name_buf format_name;
Robert Fekete3abc4e02015-10-27 16:58:32 +01003156
3157 if (!plane->state) {
3158 seq_puts(m, "plane->state is NULL!\n");
3159 continue;
3160 }
3161
3162 state = plane->state;
3163
Eric Engestrom90844f02016-08-15 01:02:38 +01003164 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003165 drm_get_format_name(state->fb->format->format,
3166 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01003167 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003168 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01003169 }
3170
Robert Fekete3abc4e02015-10-27 16:58:32 +01003171 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3172 plane->base.id,
3173 plane_type(intel_plane->base.type),
3174 state->crtc_x, state->crtc_y,
3175 state->crtc_w, state->crtc_h,
3176 (state->src_x >> 16),
3177 ((state->src_x & 0xffff) * 15625) >> 10,
3178 (state->src_y >> 16),
3179 ((state->src_y & 0xffff) * 15625) >> 10,
3180 (state->src_w >> 16),
3181 ((state->src_w & 0xffff) * 15625) >> 10,
3182 (state->src_h >> 16),
3183 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00003184 format_name.str,
Robert Fekete3abc4e02015-10-27 16:58:32 +01003185 plane_rotation(state->rotation));
3186 }
3187}
3188
3189static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3190{
3191 struct intel_crtc_state *pipe_config;
3192 int num_scalers = intel_crtc->num_scalers;
3193 int i;
3194
3195 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3196
3197 /* Not all platformas have a scaler */
3198 if (num_scalers) {
3199 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3200 num_scalers,
3201 pipe_config->scaler_state.scaler_users,
3202 pipe_config->scaler_state.scaler_id);
3203
A.Sunil Kamath58415912016-11-20 23:20:26 +05303204 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003205 struct intel_scaler *sc =
3206 &pipe_config->scaler_state.scalers[i];
3207
3208 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3209 i, yesno(sc->in_use), sc->mode);
3210 }
3211 seq_puts(m, "\n");
3212 } else {
3213 seq_puts(m, "\tNo scalers available on this platform\n");
3214 }
3215}
3216
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003217static int i915_display_info(struct seq_file *m, void *unused)
3218{
David Weinehall36cdd012016-08-22 13:59:31 +03003219 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3220 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec22014-03-12 09:13:13 +00003221 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003222 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003223 struct drm_connector_list_iter conn_iter;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003224
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003225 intel_runtime_pm_get(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003226 seq_printf(m, "CRTC info\n");
3227 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003228 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003229 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003230
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003231 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003232 pipe_config = to_intel_crtc_state(crtc->base.state);
3233
Robert Fekete3abc4e02015-10-27 16:58:32 +01003234 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec22014-03-12 09:13:13 +00003235 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003236 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003237 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3238 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3239
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003240 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003241 struct intel_plane *cursor =
3242 to_intel_plane(crtc->base.cursor);
3243
Chris Wilson065f2ec22014-03-12 09:13:13 +00003244 intel_crtc_info(m, crtc);
3245
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003246 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3247 yesno(cursor->base.state->visible),
3248 cursor->base.state->crtc_x,
3249 cursor->base.state->crtc_y,
3250 cursor->base.state->crtc_w,
3251 cursor->base.state->crtc_h,
3252 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003253 intel_scaler_info(m, crtc);
3254 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003255 }
Daniel Vettercace8412014-05-22 17:56:31 +02003256
3257 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3258 yesno(!crtc->cpu_fifo_underrun_disabled),
3259 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003260 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003261 }
3262
3263 seq_printf(m, "\n");
3264 seq_printf(m, "Connector info\n");
3265 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003266 mutex_lock(&dev->mode_config.mutex);
3267 drm_connector_list_iter_begin(dev, &conn_iter);
3268 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003269 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003270 drm_connector_list_iter_end(&conn_iter);
3271 mutex_unlock(&dev->mode_config.mutex);
3272
Paulo Zanonib0e5ddf2014-04-01 14:55:10 -03003273 intel_runtime_pm_put(dev_priv);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003274
3275 return 0;
3276}
3277
Chris Wilson1b365952016-10-04 21:11:31 +01003278static int i915_engine_info(struct seq_file *m, void *unused)
3279{
3280 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3281 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +05303282 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003283 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003284
Chris Wilson9c870d02016-10-24 13:42:15 +01003285 intel_runtime_pm_get(dev_priv);
3286
Chris Wilson6f561032018-01-24 11:36:07 +00003287 seq_printf(m, "GT awake? %s (epoch %u)\n",
3288 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003289 seq_printf(m, "Global active requests: %d\n",
3290 dev_priv->gt.active_requests);
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003291 seq_printf(m, "CS timestamp frequency: %u kHz\n",
3292 dev_priv->info.cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003293
Chris Wilsonf636edb2017-10-09 12:02:57 +01003294 p = drm_seq_file_printer(m);
3295 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003296 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003297
Chris Wilson9c870d02016-10-24 13:42:15 +01003298 intel_runtime_pm_put(dev_priv);
3299
Chris Wilson1b365952016-10-04 21:11:31 +01003300 return 0;
3301}
3302
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003303static int i915_rcs_topology(struct seq_file *m, void *unused)
3304{
3305 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3306 struct drm_printer p = drm_seq_file_printer(m);
3307
3308 intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3309
3310 return 0;
3311}
3312
Chris Wilsonc5418a82017-10-13 21:26:19 +01003313static int i915_shrinker_info(struct seq_file *m, void *unused)
3314{
3315 struct drm_i915_private *i915 = node_to_i915(m->private);
3316
3317 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3318 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3319
3320 return 0;
3321}
3322
Daniel Vetter728e29d2014-06-25 22:01:53 +03003323static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3324{
David Weinehall36cdd012016-08-22 13:59:31 +03003325 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3326 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003327 int i;
3328
3329 drm_modeset_lock_all(dev);
3330 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3331 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3332
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003333 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003334 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003335 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003336 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003337 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003338 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003339 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003340 pll->state.hw_state.dpll_md);
3341 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3342 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3343 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07003344 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3345 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3346 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3347 pll->state.hw_state.mg_refclkin_ctl);
3348 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3349 pll->state.hw_state.mg_clktop2_coreclkctl1);
3350 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3351 pll->state.hw_state.mg_clktop2_hsclkctl);
3352 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3353 pll->state.hw_state.mg_pll_div0);
3354 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3355 pll->state.hw_state.mg_pll_div1);
3356 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3357 pll->state.hw_state.mg_pll_lf);
3358 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3359 pll->state.hw_state.mg_pll_frac_lock);
3360 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3361 pll->state.hw_state.mg_pll_ssc);
3362 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3363 pll->state.hw_state.mg_pll_bias);
3364 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3365 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003366 }
3367 drm_modeset_unlock_all(dev);
3368
3369 return 0;
3370}
3371
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003372static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003373{
Chris Wilson548764b2018-06-15 13:02:07 +01003374 struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
Chris Wilsonf4ecfbf2018-04-14 13:27:54 +01003375 int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003376
Chris Wilson548764b2018-06-15 13:02:07 +01003377 seq_printf(m, "Workarounds applied: %d\n", wa->count);
3378 for (i = 0; i < wa->count; ++i)
3379 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3380 wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
Arun Siluvery888b5992014-08-26 14:44:51 +01003381
3382 return 0;
3383}
3384
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303385static int i915_ipc_status_show(struct seq_file *m, void *data)
3386{
3387 struct drm_i915_private *dev_priv = m->private;
3388
3389 seq_printf(m, "Isochronous Priority Control: %s\n",
3390 yesno(dev_priv->ipc_enabled));
3391 return 0;
3392}
3393
3394static int i915_ipc_status_open(struct inode *inode, struct file *file)
3395{
3396 struct drm_i915_private *dev_priv = inode->i_private;
3397
3398 if (!HAS_IPC(dev_priv))
3399 return -ENODEV;
3400
3401 return single_open(file, i915_ipc_status_show, dev_priv);
3402}
3403
3404static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3405 size_t len, loff_t *offp)
3406{
3407 struct seq_file *m = file->private_data;
3408 struct drm_i915_private *dev_priv = m->private;
3409 int ret;
3410 bool enable;
3411
3412 ret = kstrtobool_from_user(ubuf, len, &enable);
3413 if (ret < 0)
3414 return ret;
3415
3416 intel_runtime_pm_get(dev_priv);
3417 if (!dev_priv->ipc_enabled && enable)
3418 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3419 dev_priv->wm.distrust_bios_wm = true;
3420 dev_priv->ipc_enabled = enable;
3421 intel_enable_ipc(dev_priv);
3422 intel_runtime_pm_put(dev_priv);
3423
3424 return len;
3425}
3426
3427static const struct file_operations i915_ipc_status_fops = {
3428 .owner = THIS_MODULE,
3429 .open = i915_ipc_status_open,
3430 .read = seq_read,
3431 .llseek = seq_lseek,
3432 .release = single_release,
3433 .write = i915_ipc_status_write
3434};
3435
Damien Lespiauc5511e42014-11-04 17:06:51 +00003436static int i915_ddb_info(struct seq_file *m, void *unused)
3437{
David Weinehall36cdd012016-08-22 13:59:31 +03003438 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3439 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003440 struct skl_ddb_allocation *ddb;
3441 struct skl_ddb_entry *entry;
3442 enum pipe pipe;
3443 int plane;
3444
David Weinehall36cdd012016-08-22 13:59:31 +03003445 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003446 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003447
Damien Lespiauc5511e42014-11-04 17:06:51 +00003448 drm_modeset_lock_all(dev);
3449
3450 ddb = &dev_priv->wm.skl_hw.ddb;
3451
3452 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3453
3454 for_each_pipe(dev_priv, pipe) {
3455 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3456
Matt Roper8b364b42016-10-26 15:51:28 -07003457 for_each_universal_plane(dev_priv, pipe, plane) {
Damien Lespiauc5511e42014-11-04 17:06:51 +00003458 entry = &ddb->plane[pipe][plane];
3459 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
3460 entry->start, entry->end,
3461 skl_ddb_entry_size(entry));
3462 }
3463
Matt Roper4969d332015-09-24 15:53:10 -07003464 entry = &ddb->plane[pipe][PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003465 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3466 entry->end, skl_ddb_entry_size(entry));
3467 }
3468
3469 drm_modeset_unlock_all(dev);
3470
3471 return 0;
3472}
3473
Vandana Kannana54746e2015-03-03 20:53:10 +05303474static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003475 struct drm_device *dev,
3476 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303477{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003478 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303479 struct i915_drrs *drrs = &dev_priv->drrs;
3480 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003481 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003482 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303483
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003484 drm_connector_list_iter_begin(dev, &conn_iter);
3485 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003486 if (connector->state->crtc != &intel_crtc->base)
3487 continue;
3488
3489 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303490 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003491 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303492
3493 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3494 seq_puts(m, "\tVBT: DRRS_type: Static");
3495 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3496 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3497 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3498 seq_puts(m, "\tVBT: DRRS_type: None");
3499 else
3500 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3501
3502 seq_puts(m, "\n\n");
3503
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003504 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303505 struct intel_panel *panel;
3506
3507 mutex_lock(&drrs->mutex);
3508 /* DRRS Supported */
3509 seq_puts(m, "\tDRRS Supported: Yes\n");
3510
3511 /* disable_drrs() will make drrs->dp NULL */
3512 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303513 seq_puts(m, "Idleness DRRS: Disabled\n");
3514 if (dev_priv->psr.enabled)
3515 seq_puts(m,
3516 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303517 mutex_unlock(&drrs->mutex);
3518 return;
3519 }
3520
3521 panel = &drrs->dp->attached_connector->panel;
3522 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3523 drrs->busy_frontbuffer_bits);
3524
3525 seq_puts(m, "\n\t\t");
3526 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3527 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3528 vrefresh = panel->fixed_mode->vrefresh;
3529 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3530 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3531 vrefresh = panel->downclock_mode->vrefresh;
3532 } else {
3533 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3534 drrs->refresh_rate_type);
3535 mutex_unlock(&drrs->mutex);
3536 return;
3537 }
3538 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3539
3540 seq_puts(m, "\n\t\t");
3541 mutex_unlock(&drrs->mutex);
3542 } else {
3543 /* DRRS not supported. Print the VBT parameter*/
3544 seq_puts(m, "\tDRRS Supported : No");
3545 }
3546 seq_puts(m, "\n");
3547}
3548
3549static int i915_drrs_status(struct seq_file *m, void *unused)
3550{
David Weinehall36cdd012016-08-22 13:59:31 +03003551 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3552 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303553 struct intel_crtc *intel_crtc;
3554 int active_crtc_cnt = 0;
3555
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003556 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303557 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003558 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303559 active_crtc_cnt++;
3560 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3561
3562 drrs_status_per_crtc(m, dev, intel_crtc);
3563 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303564 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003565 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303566
3567 if (!active_crtc_cnt)
3568 seq_puts(m, "No active crtc found\n");
3569
3570 return 0;
3571}
3572
Dave Airlie11bed952014-05-12 15:22:27 +10003573static int i915_dp_mst_info(struct seq_file *m, void *unused)
3574{
David Weinehall36cdd012016-08-22 13:59:31 +03003575 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3576 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003577 struct intel_encoder *intel_encoder;
3578 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003579 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003580 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003581
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003582 drm_connector_list_iter_begin(dev, &conn_iter);
3583 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003584 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003585 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003586
3587 intel_encoder = intel_attached_encoder(connector);
3588 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3589 continue;
3590
3591 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003592 if (!intel_dig_port->dp.can_mst)
3593 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003594
Jim Bride40ae80c2016-04-14 10:18:37 -07003595 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003596 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003597 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3598 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003599 drm_connector_list_iter_end(&conn_iter);
3600
Dave Airlie11bed952014-05-12 15:22:27 +10003601 return 0;
3602}
3603
Todd Previteeb3394fa2015-04-18 00:04:19 -07003604static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003605 const char __user *ubuf,
3606 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003607{
3608 char *input_buffer;
3609 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003610 struct drm_device *dev;
3611 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003612 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003613 struct intel_dp *intel_dp;
3614 int val = 0;
3615
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303616 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003617
Todd Previteeb3394fa2015-04-18 00:04:19 -07003618 if (len == 0)
3619 return 0;
3620
Geliang Tang261aeba2017-05-06 23:40:17 +08003621 input_buffer = memdup_user_nul(ubuf, len);
3622 if (IS_ERR(input_buffer))
3623 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003624
Todd Previteeb3394fa2015-04-18 00:04:19 -07003625 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3626
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003627 drm_connector_list_iter_begin(dev, &conn_iter);
3628 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003629 struct intel_encoder *encoder;
3630
Todd Previteeb3394fa2015-04-18 00:04:19 -07003631 if (connector->connector_type !=
3632 DRM_MODE_CONNECTOR_DisplayPort)
3633 continue;
3634
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003635 encoder = to_intel_encoder(connector->encoder);
3636 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3637 continue;
3638
3639 if (encoder && connector->status == connector_status_connected) {
3640 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003641 status = kstrtoint(input_buffer, 10, &val);
3642 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003643 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003644 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3645 /* To prevent erroneous activation of the compliance
3646 * testing code, only accept an actual value of 1 here
3647 */
3648 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003649 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003650 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003651 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003652 }
3653 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003654 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003655 kfree(input_buffer);
3656 if (status < 0)
3657 return status;
3658
3659 *offp += len;
3660 return len;
3661}
3662
3663static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3664{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003665 struct drm_i915_private *dev_priv = m->private;
3666 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003667 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003668 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003669 struct intel_dp *intel_dp;
3670
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003671 drm_connector_list_iter_begin(dev, &conn_iter);
3672 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003673 struct intel_encoder *encoder;
3674
Todd Previteeb3394fa2015-04-18 00:04:19 -07003675 if (connector->connector_type !=
3676 DRM_MODE_CONNECTOR_DisplayPort)
3677 continue;
3678
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003679 encoder = to_intel_encoder(connector->encoder);
3680 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3681 continue;
3682
3683 if (encoder && connector->status == connector_status_connected) {
3684 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003685 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003686 seq_puts(m, "1");
3687 else
3688 seq_puts(m, "0");
3689 } else
3690 seq_puts(m, "0");
3691 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003692 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003693
3694 return 0;
3695}
3696
3697static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003698 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003699{
David Weinehall36cdd012016-08-22 13:59:31 +03003700 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003701 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003702}
3703
3704static const struct file_operations i915_displayport_test_active_fops = {
3705 .owner = THIS_MODULE,
3706 .open = i915_displayport_test_active_open,
3707 .read = seq_read,
3708 .llseek = seq_lseek,
3709 .release = single_release,
3710 .write = i915_displayport_test_active_write
3711};
3712
3713static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3714{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003715 struct drm_i915_private *dev_priv = m->private;
3716 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003717 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003718 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003719 struct intel_dp *intel_dp;
3720
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003721 drm_connector_list_iter_begin(dev, &conn_iter);
3722 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003723 struct intel_encoder *encoder;
3724
Todd Previteeb3394fa2015-04-18 00:04:19 -07003725 if (connector->connector_type !=
3726 DRM_MODE_CONNECTOR_DisplayPort)
3727 continue;
3728
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003729 encoder = to_intel_encoder(connector->encoder);
3730 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3731 continue;
3732
3733 if (encoder && connector->status == connector_status_connected) {
3734 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003735 if (intel_dp->compliance.test_type ==
3736 DP_TEST_LINK_EDID_READ)
3737 seq_printf(m, "%lx",
3738 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003739 else if (intel_dp->compliance.test_type ==
3740 DP_TEST_LINK_VIDEO_PATTERN) {
3741 seq_printf(m, "hdisplay: %d\n",
3742 intel_dp->compliance.test_data.hdisplay);
3743 seq_printf(m, "vdisplay: %d\n",
3744 intel_dp->compliance.test_data.vdisplay);
3745 seq_printf(m, "bpc: %u\n",
3746 intel_dp->compliance.test_data.bpc);
3747 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003748 } else
3749 seq_puts(m, "0");
3750 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003751 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003752
3753 return 0;
3754}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003755DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003756
3757static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3758{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003759 struct drm_i915_private *dev_priv = m->private;
3760 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003761 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003762 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003763 struct intel_dp *intel_dp;
3764
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003765 drm_connector_list_iter_begin(dev, &conn_iter);
3766 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003767 struct intel_encoder *encoder;
3768
Todd Previteeb3394fa2015-04-18 00:04:19 -07003769 if (connector->connector_type !=
3770 DRM_MODE_CONNECTOR_DisplayPort)
3771 continue;
3772
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003773 encoder = to_intel_encoder(connector->encoder);
3774 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3775 continue;
3776
3777 if (encoder && connector->status == connector_status_connected) {
3778 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003779 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003780 } else
3781 seq_puts(m, "0");
3782 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003783 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003784
3785 return 0;
3786}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003787DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003788
Damien Lespiau97e94b22014-11-04 17:06:50 +00003789static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003790{
David Weinehall36cdd012016-08-22 13:59:31 +03003791 struct drm_i915_private *dev_priv = m->private;
3792 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003793 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003794 int num_levels;
3795
David Weinehall36cdd012016-08-22 13:59:31 +03003796 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003797 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003798 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003799 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003800 else if (IS_G4X(dev_priv))
3801 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003802 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003803 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003804
3805 drm_modeset_lock_all(dev);
3806
3807 for (level = 0; level < num_levels; level++) {
3808 unsigned int latency = wm[level];
3809
Damien Lespiau97e94b22014-11-04 17:06:50 +00003810 /*
3811 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003812 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003813 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003814 if (INTEL_GEN(dev_priv) >= 9 ||
3815 IS_VALLEYVIEW(dev_priv) ||
3816 IS_CHERRYVIEW(dev_priv) ||
3817 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003818 latency *= 10;
3819 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003820 latency *= 5;
3821
3822 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003823 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003824 }
3825
3826 drm_modeset_unlock_all(dev);
3827}
3828
3829static int pri_wm_latency_show(struct seq_file *m, void *data)
3830{
David Weinehall36cdd012016-08-22 13:59:31 +03003831 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003832 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003833
David Weinehall36cdd012016-08-22 13:59:31 +03003834 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003835 latencies = dev_priv->wm.skl_latency;
3836 else
David Weinehall36cdd012016-08-22 13:59:31 +03003837 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003838
3839 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003840
3841 return 0;
3842}
3843
3844static int spr_wm_latency_show(struct seq_file *m, void *data)
3845{
David Weinehall36cdd012016-08-22 13:59:31 +03003846 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003847 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003848
David Weinehall36cdd012016-08-22 13:59:31 +03003849 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003850 latencies = dev_priv->wm.skl_latency;
3851 else
David Weinehall36cdd012016-08-22 13:59:31 +03003852 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003853
3854 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003855
3856 return 0;
3857}
3858
3859static int cur_wm_latency_show(struct seq_file *m, void *data)
3860{
David Weinehall36cdd012016-08-22 13:59:31 +03003861 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003862 const uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003863
David Weinehall36cdd012016-08-22 13:59:31 +03003864 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003865 latencies = dev_priv->wm.skl_latency;
3866 else
David Weinehall36cdd012016-08-22 13:59:31 +03003867 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003868
3869 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003870
3871 return 0;
3872}
3873
3874static int pri_wm_latency_open(struct inode *inode, struct file *file)
3875{
David Weinehall36cdd012016-08-22 13:59:31 +03003876 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003877
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003878 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003879 return -ENODEV;
3880
David Weinehall36cdd012016-08-22 13:59:31 +03003881 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003882}
3883
3884static int spr_wm_latency_open(struct inode *inode, struct file *file)
3885{
David Weinehall36cdd012016-08-22 13:59:31 +03003886 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003887
David Weinehall36cdd012016-08-22 13:59:31 +03003888 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003889 return -ENODEV;
3890
David Weinehall36cdd012016-08-22 13:59:31 +03003891 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003892}
3893
3894static int cur_wm_latency_open(struct inode *inode, struct file *file)
3895{
David Weinehall36cdd012016-08-22 13:59:31 +03003896 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003897
David Weinehall36cdd012016-08-22 13:59:31 +03003898 if (HAS_GMCH_DISPLAY(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003899 return -ENODEV;
3900
David Weinehall36cdd012016-08-22 13:59:31 +03003901 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003902}
3903
3904static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Damien Lespiau97e94b22014-11-04 17:06:50 +00003905 size_t len, loff_t *offp, uint16_t wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003906{
3907 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003908 struct drm_i915_private *dev_priv = m->private;
3909 struct drm_device *dev = &dev_priv->drm;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003910 uint16_t new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003911 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003912 int level;
3913 int ret;
3914 char tmp[32];
3915
David Weinehall36cdd012016-08-22 13:59:31 +03003916 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003917 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003918 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003919 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003920 else if (IS_G4X(dev_priv))
3921 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003922 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003923 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003924
Ville Syrjälä369a1342014-01-22 14:36:08 +02003925 if (len >= sizeof(tmp))
3926 return -EINVAL;
3927
3928 if (copy_from_user(tmp, ubuf, len))
3929 return -EFAULT;
3930
3931 tmp[len] = '\0';
3932
Damien Lespiau97e94b22014-11-04 17:06:50 +00003933 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3934 &new[0], &new[1], &new[2], &new[3],
3935 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003936 if (ret != num_levels)
3937 return -EINVAL;
3938
3939 drm_modeset_lock_all(dev);
3940
3941 for (level = 0; level < num_levels; level++)
3942 wm[level] = new[level];
3943
3944 drm_modeset_unlock_all(dev);
3945
3946 return len;
3947}
3948
3949
3950static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3951 size_t len, loff_t *offp)
3952{
3953 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003954 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003955 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003956
David Weinehall36cdd012016-08-22 13:59:31 +03003957 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003958 latencies = dev_priv->wm.skl_latency;
3959 else
David Weinehall36cdd012016-08-22 13:59:31 +03003960 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003961
3962 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003963}
3964
3965static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3966 size_t len, loff_t *offp)
3967{
3968 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003969 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003970 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003971
David Weinehall36cdd012016-08-22 13:59:31 +03003972 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003973 latencies = dev_priv->wm.skl_latency;
3974 else
David Weinehall36cdd012016-08-22 13:59:31 +03003975 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003976
3977 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003978}
3979
3980static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3981 size_t len, loff_t *offp)
3982{
3983 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003984 struct drm_i915_private *dev_priv = m->private;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003985 uint16_t *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003986
David Weinehall36cdd012016-08-22 13:59:31 +03003987 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003988 latencies = dev_priv->wm.skl_latency;
3989 else
David Weinehall36cdd012016-08-22 13:59:31 +03003990 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003991
3992 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003993}
3994
3995static const struct file_operations i915_pri_wm_latency_fops = {
3996 .owner = THIS_MODULE,
3997 .open = pri_wm_latency_open,
3998 .read = seq_read,
3999 .llseek = seq_lseek,
4000 .release = single_release,
4001 .write = pri_wm_latency_write
4002};
4003
4004static const struct file_operations i915_spr_wm_latency_fops = {
4005 .owner = THIS_MODULE,
4006 .open = spr_wm_latency_open,
4007 .read = seq_read,
4008 .llseek = seq_lseek,
4009 .release = single_release,
4010 .write = spr_wm_latency_write
4011};
4012
4013static const struct file_operations i915_cur_wm_latency_fops = {
4014 .owner = THIS_MODULE,
4015 .open = cur_wm_latency_open,
4016 .read = seq_read,
4017 .llseek = seq_lseek,
4018 .release = single_release,
4019 .write = cur_wm_latency_write
4020};
4021
Kees Cook647416f2013-03-10 14:10:06 -07004022static int
4023i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004024{
David Weinehall36cdd012016-08-22 13:59:31 +03004025 struct drm_i915_private *dev_priv = data;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004026
Chris Wilsond98c52c2016-04-13 17:35:05 +01004027 *val = i915_terminally_wedged(&dev_priv->gpu_error);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004028
Kees Cook647416f2013-03-10 14:10:06 -07004029 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004030}
4031
Kees Cook647416f2013-03-10 14:10:06 -07004032static int
4033i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004034{
Chris Wilson598b6b52017-03-25 13:47:35 +00004035 struct drm_i915_private *i915 = data;
4036 struct intel_engine_cs *engine;
4037 unsigned int tmp;
Imre Deakd46c0512014-04-14 20:24:27 +03004038
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02004039 /*
4040 * There is no safeguard against this debugfs entry colliding
4041 * with the hangcheck calling same i915_handle_error() in
4042 * parallel, causing an explosion. For now we assume that the
4043 * test harness is responsible enough not to inject gpu hangs
4044 * while it is writing to 'i915_wedged'
4045 */
4046
Chris Wilson598b6b52017-03-25 13:47:35 +00004047 if (i915_reset_backoff(&i915->gpu_error))
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02004048 return -EAGAIN;
4049
Chris Wilson598b6b52017-03-25 13:47:35 +00004050 for_each_engine_masked(engine, i915, val, tmp) {
4051 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4052 engine->hangcheck.stalled = true;
4053 }
Imre Deakd46c0512014-04-14 20:24:27 +03004054
Chris Wilsonce800752018-03-20 10:04:49 +00004055 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4056 "Manually set wedged engine mask = %llx", val);
Chris Wilson598b6b52017-03-25 13:47:35 +00004057
4058 wait_on_bit(&i915->gpu_error.flags,
Chris Wilsond3df42b2017-03-16 17:13:05 +00004059 I915_RESET_HANDOFF,
4060 TASK_UNINTERRUPTIBLE);
4061
Kees Cook647416f2013-03-10 14:10:06 -07004062 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004063}
4064
Kees Cook647416f2013-03-10 14:10:06 -07004065DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4066 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03004067 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004068
Kees Cook647416f2013-03-10 14:10:06 -07004069static int
Chris Wilson64486ae2017-03-07 15:59:08 +00004070fault_irq_set(struct drm_i915_private *i915,
4071 unsigned long *irq,
4072 unsigned long val)
4073{
4074 int err;
4075
4076 err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4077 if (err)
4078 return err;
4079
4080 err = i915_gem_wait_for_idle(i915,
4081 I915_WAIT_LOCKED |
Chris Wilsonec625fb2018-07-09 13:20:42 +01004082 I915_WAIT_INTERRUPTIBLE,
4083 MAX_SCHEDULE_TIMEOUT);
Chris Wilson64486ae2017-03-07 15:59:08 +00004084 if (err)
4085 goto err_unlock;
4086
Chris Wilson64486ae2017-03-07 15:59:08 +00004087 *irq = val;
4088 mutex_unlock(&i915->drm.struct_mutex);
4089
4090 /* Flush idle worker to disarm irq */
Chris Wilson7c262402017-10-06 11:40:38 +01004091 drain_delayed_work(&i915->gt.idle_work);
Chris Wilson64486ae2017-03-07 15:59:08 +00004092
4093 return 0;
4094
4095err_unlock:
4096 mutex_unlock(&i915->drm.struct_mutex);
4097 return err;
4098}
4099
4100static int
Chris Wilson094f9a52013-09-25 17:34:55 +01004101i915_ring_missed_irq_get(void *data, u64 *val)
4102{
David Weinehall36cdd012016-08-22 13:59:31 +03004103 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004104
4105 *val = dev_priv->gpu_error.missed_irq_rings;
4106 return 0;
4107}
4108
4109static int
4110i915_ring_missed_irq_set(void *data, u64 val)
4111{
Chris Wilson64486ae2017-03-07 15:59:08 +00004112 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004113
Chris Wilson64486ae2017-03-07 15:59:08 +00004114 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004115}
4116
4117DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4118 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4119 "0x%08llx\n");
4120
4121static int
4122i915_ring_test_irq_get(void *data, u64 *val)
4123{
David Weinehall36cdd012016-08-22 13:59:31 +03004124 struct drm_i915_private *dev_priv = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004125
4126 *val = dev_priv->gpu_error.test_irq_rings;
4127
4128 return 0;
4129}
4130
4131static int
4132i915_ring_test_irq_set(void *data, u64 val)
4133{
Chris Wilson64486ae2017-03-07 15:59:08 +00004134 struct drm_i915_private *i915 = data;
Chris Wilson094f9a52013-09-25 17:34:55 +01004135
Chris Wilson5f521722018-09-07 12:28:51 +01004136 /* GuC keeps the user interrupt permanently enabled for submission */
4137 if (USES_GUC_SUBMISSION(i915))
4138 return -ENODEV;
4139
4140 /*
4141 * From icl, we can no longer individually mask interrupt generation
4142 * from each engine.
4143 */
4144 if (INTEL_GEN(i915) >= 11)
4145 return -ENODEV;
4146
Chris Wilson64486ae2017-03-07 15:59:08 +00004147 val &= INTEL_INFO(i915)->ring_mask;
Chris Wilson094f9a52013-09-25 17:34:55 +01004148 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004149
Chris Wilson64486ae2017-03-07 15:59:08 +00004150 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
Chris Wilson094f9a52013-09-25 17:34:55 +01004151}
4152
4153DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4154 i915_ring_test_irq_get, i915_ring_test_irq_set,
4155 "0x%08llx\n");
4156
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004157#define DROP_UNBOUND BIT(0)
4158#define DROP_BOUND BIT(1)
4159#define DROP_RETIRE BIT(2)
4160#define DROP_ACTIVE BIT(3)
4161#define DROP_FREED BIT(4)
4162#define DROP_SHRINK_ALL BIT(5)
4163#define DROP_IDLE BIT(6)
Chris Wilson6b048702018-09-03 09:33:37 +01004164#define DROP_RESET_ACTIVE BIT(7)
4165#define DROP_RESET_SEQNO BIT(8)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004166#define DROP_ALL (DROP_UNBOUND | \
4167 DROP_BOUND | \
4168 DROP_RETIRE | \
4169 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00004170 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004171 DROP_SHRINK_ALL |\
Chris Wilson6b048702018-09-03 09:33:37 +01004172 DROP_IDLE | \
4173 DROP_RESET_ACTIVE | \
4174 DROP_RESET_SEQNO)
Kees Cook647416f2013-03-10 14:10:06 -07004175static int
4176i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004177{
Kees Cook647416f2013-03-10 14:10:06 -07004178 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00004179
Kees Cook647416f2013-03-10 14:10:06 -07004180 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004181}
4182
Kees Cook647416f2013-03-10 14:10:06 -07004183static int
4184i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00004185{
Chris Wilson6b048702018-09-03 09:33:37 +01004186 struct drm_i915_private *i915 = data;
Chris Wilson00c26cf2017-05-24 17:26:53 +01004187 int ret = 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00004188
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004189 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4190 val, val & DROP_ALL);
Chris Wilson9d3eb2c2018-10-15 12:58:56 +01004191 intel_runtime_pm_get(i915);
Chris Wilsondd624af2013-01-15 12:39:35 +00004192
Chris Wilson6b048702018-09-03 09:33:37 +01004193 if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4194 i915_gem_set_wedged(i915);
4195
Chris Wilsondd624af2013-01-15 12:39:35 +00004196 /* No need to check and wait for gpu resets, only libdrm auto-restarts
4197 * on ioctls on -EAGAIN. */
Chris Wilson6b048702018-09-03 09:33:37 +01004198 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4199 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00004200 if (ret)
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03004201 goto out;
Chris Wilsondd624af2013-01-15 12:39:35 +00004202
Chris Wilson00c26cf2017-05-24 17:26:53 +01004203 if (val & DROP_ACTIVE)
Chris Wilson6b048702018-09-03 09:33:37 +01004204 ret = i915_gem_wait_for_idle(i915,
Chris Wilson00c26cf2017-05-24 17:26:53 +01004205 I915_WAIT_INTERRUPTIBLE |
Chris Wilsonec625fb2018-07-09 13:20:42 +01004206 I915_WAIT_LOCKED,
4207 MAX_SCHEDULE_TIMEOUT);
Chris Wilson00c26cf2017-05-24 17:26:53 +01004208
Chris Wilson9d3eb2c2018-10-15 12:58:56 +01004209 if (ret == 0 && val & DROP_RESET_SEQNO)
Chris Wilson6b048702018-09-03 09:33:37 +01004210 ret = i915_gem_set_global_seqno(&i915->drm, 1);
Chris Wilson00c26cf2017-05-24 17:26:53 +01004211
Chris Wilson6b048702018-09-03 09:33:37 +01004212 if (val & DROP_RETIRE)
4213 i915_retire_requests(i915);
4214
4215 mutex_unlock(&i915->drm.struct_mutex);
4216 }
4217
4218 if (val & DROP_RESET_ACTIVE &&
4219 i915_terminally_wedged(&i915->gpu_error)) {
4220 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4221 wait_on_bit(&i915->gpu_error.flags,
4222 I915_RESET_HANDOFF,
4223 TASK_UNINTERRUPTIBLE);
Chris Wilson00c26cf2017-05-24 17:26:53 +01004224 }
Chris Wilsondd624af2013-01-15 12:39:35 +00004225
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004226 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01004227 if (val & DROP_BOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01004228 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01004229
Chris Wilson21ab4e72014-09-09 11:16:08 +01004230 if (val & DROP_UNBOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01004231 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00004232
Chris Wilson8eadc192017-03-08 14:46:22 +00004233 if (val & DROP_SHRINK_ALL)
Chris Wilson6b048702018-09-03 09:33:37 +01004234 i915_gem_shrink_all(i915);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01004235 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00004236
Chris Wilson4dfacb02018-05-31 09:22:43 +01004237 if (val & DROP_IDLE) {
4238 do {
Chris Wilson6b048702018-09-03 09:33:37 +01004239 if (READ_ONCE(i915->gt.active_requests))
4240 flush_delayed_work(&i915->gt.retire_work);
4241 drain_delayed_work(&i915->gt.idle_work);
4242 } while (READ_ONCE(i915->gt.awake));
Chris Wilson4dfacb02018-05-31 09:22:43 +01004243 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01004244
Chris Wilsonc9c704712018-02-19 22:06:31 +00004245 if (val & DROP_FREED)
Chris Wilson6b048702018-09-03 09:33:37 +01004246 i915_gem_drain_freed_objects(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01004247
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03004248out:
Chris Wilson9d3eb2c2018-10-15 12:58:56 +01004249 intel_runtime_pm_put(i915);
4250
Kees Cook647416f2013-03-10 14:10:06 -07004251 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00004252}
4253
Kees Cook647416f2013-03-10 14:10:06 -07004254DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4255 i915_drop_caches_get, i915_drop_caches_set,
4256 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00004257
Kees Cook647416f2013-03-10 14:10:06 -07004258static int
Kees Cook647416f2013-03-10 14:10:06 -07004259i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004260{
David Weinehall36cdd012016-08-22 13:59:31 +03004261 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004262 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004263
David Weinehall36cdd012016-08-22 13:59:31 +03004264 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004265 return -ENODEV;
4266
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004267 intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02004268
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004269 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004270
4271 intel_runtime_pm_put(dev_priv);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004272
Kees Cook647416f2013-03-10 14:10:06 -07004273 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004274
Kees Cook647416f2013-03-10 14:10:06 -07004275 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004276}
4277
Kees Cook647416f2013-03-10 14:10:06 -07004278static int
4279i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004280{
David Weinehall36cdd012016-08-22 13:59:31 +03004281 struct drm_i915_private *dev_priv = data;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004282 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004283
David Weinehall36cdd012016-08-22 13:59:31 +03004284 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
Daniel Vetter004777c2012-08-09 15:07:01 +02004285 return -ENODEV;
4286
Kees Cook647416f2013-03-10 14:10:06 -07004287 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004288 return -EINVAL;
4289
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004290 intel_runtime_pm_get(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004291 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004292
4293 /* Update the cache sharing policy here as well */
4294 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4295 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4296 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4297 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4298
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02004299 intel_runtime_pm_put(dev_priv);
Kees Cook647416f2013-03-10 14:10:06 -07004300 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004301}
4302
Kees Cook647416f2013-03-10 14:10:06 -07004303DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4304 i915_cache_sharing_get, i915_cache_sharing_set,
4305 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004306
David Weinehall36cdd012016-08-22 13:59:31 +03004307static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004308 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004309{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004310#define SS_MAX 2
4311 const int ss_max = SS_MAX;
4312 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004313 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004314
4315 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4316 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4317 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4318 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4319
4320 for (ss = 0; ss < ss_max; ss++) {
4321 unsigned int eu_cnt;
4322
4323 if (sig1[ss] & CHV_SS_PG_ENABLE)
4324 /* skip disabled subslice */
4325 continue;
4326
Imre Deakf08a0c92016-08-31 19:13:04 +03004327 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004328 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004329 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4330 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4331 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4332 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004333 sseu->eu_total += eu_cnt;
4334 sseu->eu_per_subslice = max_t(unsigned int,
4335 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004336 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004337#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004338}
4339
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004340static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4341 struct sseu_dev_info *sseu)
4342{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004343#define SS_MAX 6
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004344 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004345 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004346 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004347
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004348 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004349 /*
4350 * FIXME: Valid SS Mask respects the spec and read
4351 * only valid bits for those registers, excluding reserverd
4352 * although this seems wrong because it would leave many
4353 * subslices without ACK.
4354 */
4355 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4356 GEN10_PGCTL_VALID_SS_MASK(s);
4357 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4358 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4359 }
4360
4361 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4362 GEN9_PGCTL_SSA_EU19_ACK |
4363 GEN9_PGCTL_SSA_EU210_ACK |
4364 GEN9_PGCTL_SSA_EU311_ACK;
4365 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4366 GEN9_PGCTL_SSB_EU19_ACK |
4367 GEN9_PGCTL_SSB_EU210_ACK |
4368 GEN9_PGCTL_SSB_EU311_ACK;
4369
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004370 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004371 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4372 /* skip disabled slice */
4373 continue;
4374
4375 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004376 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004377
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004378 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004379 unsigned int eu_cnt;
4380
4381 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4382 /* skip disabled subslice */
4383 continue;
4384
4385 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4386 eu_mask[ss % 2]);
4387 sseu->eu_total += eu_cnt;
4388 sseu->eu_per_subslice = max_t(unsigned int,
4389 sseu->eu_per_subslice,
4390 eu_cnt);
4391 }
4392 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004393#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004394}
4395
David Weinehall36cdd012016-08-22 13:59:31 +03004396static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004397 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004398{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004399#define SS_MAX 3
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004400 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004401 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004402 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004403
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004404 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004405 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4406 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4407 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4408 }
4409
Jeff McGee5d395252015-04-03 18:13:17 -07004410 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4411 GEN9_PGCTL_SSA_EU19_ACK |
4412 GEN9_PGCTL_SSA_EU210_ACK |
4413 GEN9_PGCTL_SSA_EU311_ACK;
4414 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4415 GEN9_PGCTL_SSB_EU19_ACK |
4416 GEN9_PGCTL_SSB_EU210_ACK |
4417 GEN9_PGCTL_SSB_EU311_ACK;
4418
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004419 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004420 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4421 /* skip disabled slice */
4422 continue;
4423
Imre Deakf08a0c92016-08-31 19:13:04 +03004424 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004425
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004426 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004427 sseu->subslice_mask[s] =
4428 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004429
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004430 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004431 unsigned int eu_cnt;
4432
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004433 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004434 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4435 /* skip disabled subslice */
4436 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004437
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004438 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004439 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004440
Jeff McGee5d395252015-04-03 18:13:17 -07004441 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4442 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004443 sseu->eu_total += eu_cnt;
4444 sseu->eu_per_subslice = max_t(unsigned int,
4445 sseu->eu_per_subslice,
4446 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004447 }
4448 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004449#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004450}
4451
David Weinehall36cdd012016-08-22 13:59:31 +03004452static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004453 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004454{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004455 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004456 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004457
Imre Deakf08a0c92016-08-31 19:13:04 +03004458 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004459
Imre Deakf08a0c92016-08-31 19:13:04 +03004460 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004461 sseu->eu_per_subslice =
4462 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004463 for (s = 0; s < fls(sseu->slice_mask); s++) {
4464 sseu->subslice_mask[s] =
4465 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4466 }
Imre Deak57ec1712016-08-31 19:13:05 +03004467 sseu->eu_total = sseu->eu_per_subslice *
4468 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004469
4470 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004471 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004472 u8 subslice_7eu =
4473 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004474
Imre Deak915490d2016-08-31 19:13:01 +03004475 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004476 }
4477 }
4478}
4479
Imre Deak615d8902016-08-31 19:13:03 +03004480static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4481 const struct sseu_dev_info *sseu)
4482{
4483 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4484 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004485 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004486
Imre Deakc67ba532016-08-31 19:13:06 +03004487 seq_printf(m, " %s Slice Mask: %04x\n", type,
4488 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004489 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004490 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004491 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004492 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004493 for (s = 0; s < fls(sseu->slice_mask); s++) {
4494 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4495 s, hweight8(sseu->subslice_mask[s]));
4496 }
Imre Deak615d8902016-08-31 19:13:03 +03004497 seq_printf(m, " %s EU Total: %u\n", type,
4498 sseu->eu_total);
4499 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4500 sseu->eu_per_subslice);
4501
4502 if (!is_available_info)
4503 return;
4504
4505 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4506 if (HAS_POOLED_EU(dev_priv))
4507 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4508
4509 seq_printf(m, " Has Slice Power Gating: %s\n",
4510 yesno(sseu->has_slice_pg));
4511 seq_printf(m, " Has Subslice Power Gating: %s\n",
4512 yesno(sseu->has_subslice_pg));
4513 seq_printf(m, " Has EU Power Gating: %s\n",
4514 yesno(sseu->has_eu_pg));
4515}
4516
Jeff McGee38732182015-02-13 10:27:54 -06004517static int i915_sseu_status(struct seq_file *m, void *unused)
4518{
David Weinehall36cdd012016-08-22 13:59:31 +03004519 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004520 struct sseu_dev_info sseu;
Jeff McGee38732182015-02-13 10:27:54 -06004521
David Weinehall36cdd012016-08-22 13:59:31 +03004522 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004523 return -ENODEV;
4524
4525 seq_puts(m, "SSEU Device Info\n");
Imre Deak615d8902016-08-31 19:13:03 +03004526 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004527
Jeff McGee7f992ab2015-02-13 10:27:55 -06004528 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004529 memset(&sseu, 0, sizeof(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004530 sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4531 sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4532 sseu.max_eus_per_subslice =
4533 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004534
4535 intel_runtime_pm_get(dev_priv);
4536
David Weinehall36cdd012016-08-22 13:59:31 +03004537 if (IS_CHERRYVIEW(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004538 cherryview_sseu_device_status(dev_priv, &sseu);
David Weinehall36cdd012016-08-22 13:59:31 +03004539 } else if (IS_BROADWELL(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004540 broadwell_sseu_device_status(dev_priv, &sseu);
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004541 } else if (IS_GEN9(dev_priv)) {
Imre Deak915490d2016-08-31 19:13:01 +03004542 gen9_sseu_device_status(dev_priv, &sseu);
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004543 } else if (INTEL_GEN(dev_priv) >= 10) {
4544 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004545 }
David Weinehall238010e2016-08-01 17:33:27 +03004546
4547 intel_runtime_pm_put(dev_priv);
4548
Imre Deak615d8902016-08-31 19:13:03 +03004549 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004550
Jeff McGee38732182015-02-13 10:27:54 -06004551 return 0;
4552}
4553
Ben Widawsky6d794d42011-04-25 11:25:56 -07004554static int i915_forcewake_open(struct inode *inode, struct file *file)
4555{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004556 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004557
Chris Wilsond7a133d2017-09-07 14:44:41 +01004558 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004559 return 0;
4560
Chris Wilsond7a133d2017-09-07 14:44:41 +01004561 intel_runtime_pm_get(i915);
4562 intel_uncore_forcewake_user_get(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004563
4564 return 0;
4565}
4566
Ben Widawskyc43b5632012-04-16 14:07:40 -07004567static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004568{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004569 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004570
Chris Wilsond7a133d2017-09-07 14:44:41 +01004571 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004572 return 0;
4573
Chris Wilsond7a133d2017-09-07 14:44:41 +01004574 intel_uncore_forcewake_user_put(i915);
4575 intel_runtime_pm_put(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004576
4577 return 0;
4578}
4579
4580static const struct file_operations i915_forcewake_fops = {
4581 .owner = THIS_MODULE,
4582 .open = i915_forcewake_open,
4583 .release = i915_forcewake_release,
4584};
4585
Lyude317eaa92017-02-03 21:18:25 -05004586static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4587{
4588 struct drm_i915_private *dev_priv = m->private;
4589 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4590
4591 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4592 seq_printf(m, "Detected: %s\n",
4593 yesno(delayed_work_pending(&hotplug->reenable_work)));
4594
4595 return 0;
4596}
4597
4598static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4599 const char __user *ubuf, size_t len,
4600 loff_t *offp)
4601{
4602 struct seq_file *m = file->private_data;
4603 struct drm_i915_private *dev_priv = m->private;
4604 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4605 unsigned int new_threshold;
4606 int i;
4607 char *newline;
4608 char tmp[16];
4609
4610 if (len >= sizeof(tmp))
4611 return -EINVAL;
4612
4613 if (copy_from_user(tmp, ubuf, len))
4614 return -EFAULT;
4615
4616 tmp[len] = '\0';
4617
4618 /* Strip newline, if any */
4619 newline = strchr(tmp, '\n');
4620 if (newline)
4621 *newline = '\0';
4622
4623 if (strcmp(tmp, "reset") == 0)
4624 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4625 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4626 return -EINVAL;
4627
4628 if (new_threshold > 0)
4629 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4630 new_threshold);
4631 else
4632 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4633
4634 spin_lock_irq(&dev_priv->irq_lock);
4635 hotplug->hpd_storm_threshold = new_threshold;
4636 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4637 for_each_hpd_pin(i)
4638 hotplug->stats[i].count = 0;
4639 spin_unlock_irq(&dev_priv->irq_lock);
4640
4641 /* Re-enable hpd immediately if we were in an irq storm */
4642 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4643
4644 return len;
4645}
4646
4647static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4648{
4649 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4650}
4651
4652static const struct file_operations i915_hpd_storm_ctl_fops = {
4653 .owner = THIS_MODULE,
4654 .open = i915_hpd_storm_ctl_open,
4655 .read = seq_read,
4656 .llseek = seq_lseek,
4657 .release = single_release,
4658 .write = i915_hpd_storm_ctl_write
4659};
4660
C, Ramalingam35954e82017-11-08 00:08:23 +05304661static int i915_drrs_ctl_set(void *data, u64 val)
4662{
4663 struct drm_i915_private *dev_priv = data;
4664 struct drm_device *dev = &dev_priv->drm;
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004665 struct intel_crtc *crtc;
C, Ramalingam35954e82017-11-08 00:08:23 +05304666
4667 if (INTEL_GEN(dev_priv) < 7)
4668 return -ENODEV;
4669
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004670 for_each_intel_crtc(dev, crtc) {
4671 struct drm_connector_list_iter conn_iter;
4672 struct intel_crtc_state *crtc_state;
4673 struct drm_connector *connector;
4674 struct drm_crtc_commit *commit;
4675 int ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304676
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004677 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4678 if (ret)
4679 return ret;
4680
4681 crtc_state = to_intel_crtc_state(crtc->base.state);
4682
4683 if (!crtc_state->base.active ||
4684 !crtc_state->has_drrs)
4685 goto out;
4686
4687 commit = crtc_state->base.commit;
4688 if (commit) {
4689 ret = wait_for_completion_interruptible(&commit->hw_done);
4690 if (ret)
4691 goto out;
4692 }
4693
4694 drm_connector_list_iter_begin(dev, &conn_iter);
4695 drm_for_each_connector_iter(connector, &conn_iter) {
4696 struct intel_encoder *encoder;
4697 struct intel_dp *intel_dp;
4698
4699 if (!(crtc_state->base.connector_mask &
4700 drm_connector_mask(connector)))
4701 continue;
4702
4703 encoder = intel_attached_encoder(connector);
C, Ramalingam35954e82017-11-08 00:08:23 +05304704 if (encoder->type != INTEL_OUTPUT_EDP)
4705 continue;
4706
4707 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4708 val ? "en" : "dis", val);
4709
4710 intel_dp = enc_to_intel_dp(&encoder->base);
4711 if (val)
4712 intel_edp_drrs_enable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004713 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304714 else
4715 intel_edp_drrs_disable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004716 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304717 }
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004718 drm_connector_list_iter_end(&conn_iter);
4719
4720out:
4721 drm_modeset_unlock(&crtc->base.mutex);
4722 if (ret)
4723 return ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304724 }
C, Ramalingam35954e82017-11-08 00:08:23 +05304725
4726 return 0;
4727}
4728
4729DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4730
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004731static ssize_t
4732i915_fifo_underrun_reset_write(struct file *filp,
4733 const char __user *ubuf,
4734 size_t cnt, loff_t *ppos)
4735{
4736 struct drm_i915_private *dev_priv = filp->private_data;
4737 struct intel_crtc *intel_crtc;
4738 struct drm_device *dev = &dev_priv->drm;
4739 int ret;
4740 bool reset;
4741
4742 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4743 if (ret)
4744 return ret;
4745
4746 if (!reset)
4747 return cnt;
4748
4749 for_each_intel_crtc(dev, intel_crtc) {
4750 struct drm_crtc_commit *commit;
4751 struct intel_crtc_state *crtc_state;
4752
4753 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4754 if (ret)
4755 return ret;
4756
4757 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4758 commit = crtc_state->base.commit;
4759 if (commit) {
4760 ret = wait_for_completion_interruptible(&commit->hw_done);
4761 if (!ret)
4762 ret = wait_for_completion_interruptible(&commit->flip_done);
4763 }
4764
4765 if (!ret && crtc_state->base.active) {
4766 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4767 pipe_name(intel_crtc->pipe));
4768
4769 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4770 }
4771
4772 drm_modeset_unlock(&intel_crtc->base.mutex);
4773
4774 if (ret)
4775 return ret;
4776 }
4777
4778 ret = intel_fbc_reset_underrun(dev_priv);
4779 if (ret)
4780 return ret;
4781
4782 return cnt;
4783}
4784
4785static const struct file_operations i915_fifo_underrun_reset_ops = {
4786 .owner = THIS_MODULE,
4787 .open = simple_open,
4788 .write = i915_fifo_underrun_reset_write,
4789 .llseek = default_llseek,
4790};
4791
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004792static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004793 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004794 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004795 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004796 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004797 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004798 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004799 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004800 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004801 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004802 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004803 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004804 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004805 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304806 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004807 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004808 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004809 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004810 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004811 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004812 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004813 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004814 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004815 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004816 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004817 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004818 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004819 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004820 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004821 {"i915_swizzle_info", i915_swizzle_info, 0},
Daniel Vetter3cf17fc2012-02-09 17:15:49 +01004822 {"i915_ppgtt_info", i915_ppgtt_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004823 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004824 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004825 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004826 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004827 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004828 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004829 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004830 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004831 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004832 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004833 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004834 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004835 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004836 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004837 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304838 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004839 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004840};
Ben Gamari27c202a2009-07-01 22:26:52 -04004841#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004842
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004843static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004844 const char *name;
4845 const struct file_operations *fops;
4846} i915_debugfs_files[] = {
4847 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004848 {"i915_cache_sharing", &i915_cache_sharing_fops},
Chris Wilson094f9a52013-09-25 17:34:55 +01004849 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4850 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004851 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004852#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004853 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004854 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004855#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004856 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004857 {"i915_next_seqno", &i915_next_seqno_fops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004858 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4859 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4860 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004861 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004862 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4863 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304864 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004865 {"i915_guc_log_level", &i915_guc_log_level_fops},
4866 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304867 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304868 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004869 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4870 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004871};
4872
Chris Wilson1dac8912016-06-24 14:00:17 +01004873int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004874{
Chris Wilson91c8a322016-07-05 10:40:23 +01004875 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004876 struct dentry *ent;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004877 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004878
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004879 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4880 minor->debugfs_root, to_i915(minor->dev),
4881 &i915_forcewake_fops);
4882 if (!ent)
4883 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004884
Daniel Vetter34b96742013-07-04 20:49:44 +02004885 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004886 ent = debugfs_create_file(i915_debugfs_files[i].name,
4887 S_IRUGO | S_IWUSR,
4888 minor->debugfs_root,
4889 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004890 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004891 if (!ent)
4892 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004893 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004894
Ben Gamari27c202a2009-07-01 22:26:52 -04004895 return drm_debugfs_create_files(i915_debugfs_list,
4896 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004897 minor->debugfs_root, minor);
4898}
4899
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004900struct dpcd_block {
4901 /* DPCD dump start address. */
4902 unsigned int offset;
4903 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4904 unsigned int end;
4905 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4906 size_t size;
4907 /* Only valid for eDP. */
4908 bool edp;
4909};
4910
4911static const struct dpcd_block i915_dpcd_debug[] = {
4912 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4913 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4914 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4915 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4916 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4917 { .offset = DP_SET_POWER },
4918 { .offset = DP_EDP_DPCD_REV },
4919 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4920 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4921 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4922};
4923
4924static int i915_dpcd_show(struct seq_file *m, void *data)
4925{
4926 struct drm_connector *connector = m->private;
4927 struct intel_dp *intel_dp =
4928 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4929 uint8_t buf[16];
4930 ssize_t err;
4931 int i;
4932
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004933 if (connector->status != connector_status_connected)
4934 return -ENODEV;
4935
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004936 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4937 const struct dpcd_block *b = &i915_dpcd_debug[i];
4938 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4939
4940 if (b->edp &&
4941 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4942 continue;
4943
4944 /* low tech for now */
4945 if (WARN_ON(size > sizeof(buf)))
4946 continue;
4947
4948 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
Chris Wilson65404c82018-10-10 09:17:06 +01004949 if (err < 0)
4950 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4951 else
4952 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004953 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004954
4955 return 0;
4956}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004957DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004958
David Weinehallecbd6782016-08-23 12:23:56 +03004959static int i915_panel_show(struct seq_file *m, void *data)
4960{
4961 struct drm_connector *connector = m->private;
4962 struct intel_dp *intel_dp =
4963 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4964
4965 if (connector->status != connector_status_connected)
4966 return -ENODEV;
4967
4968 seq_printf(m, "Panel power up delay: %d\n",
4969 intel_dp->panel_power_up_delay);
4970 seq_printf(m, "Panel power down delay: %d\n",
4971 intel_dp->panel_power_down_delay);
4972 seq_printf(m, "Backlight on delay: %d\n",
4973 intel_dp->backlight_on_delay);
4974 seq_printf(m, "Backlight off delay: %d\n",
4975 intel_dp->backlight_off_delay);
4976
4977 return 0;
4978}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004979DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004980
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304981static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4982{
4983 struct drm_connector *connector = m->private;
4984 struct intel_connector *intel_connector = to_intel_connector(connector);
4985
4986 if (connector->status != connector_status_connected)
4987 return -ENODEV;
4988
4989 /* HDCP is supported by connector */
Ramalingam Cd3dacc72018-10-29 15:15:46 +05304990 if (!intel_connector->hdcp.shim)
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304991 return -EINVAL;
4992
4993 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4994 connector->base.id);
4995 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4996 "None" : "HDCP1.4");
4997 seq_puts(m, "\n");
4998
4999 return 0;
5000}
5001DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
5002
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005003/**
5004 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5005 * @connector: pointer to a registered drm_connector
5006 *
5007 * Cleanup will be done by drm_connector_unregister() through a call to
5008 * drm_debugfs_connector_remove().
5009 *
5010 * Returns 0 on success, negative error codes on error.
5011 */
5012int i915_debugfs_connector_add(struct drm_connector *connector)
5013{
5014 struct dentry *root = connector->debugfs_entry;
5015
5016 /* The connector must have been registered beforehands. */
5017 if (!root)
5018 return -ENODEV;
5019
5020 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5021 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03005022 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5023 connector, &i915_dpcd_fops);
5024
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07005025 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
David Weinehallecbd6782016-08-23 12:23:56 +03005026 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5027 connector, &i915_panel_fops);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07005028 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5029 connector, &i915_psr_sink_status_fops);
5030 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005031
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05305032 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5033 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5034 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5035 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5036 connector, &i915_hdcp_sink_capability_fops);
5037 }
5038
Jani Nikulaaa7471d2015-04-01 11:15:21 +03005039 return 0;
5040}