blob: b070e6f3780cd7ff331e9b52ac06bd128cf69d6e [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010029#include <linux/sched/mm.h>
Jani Nikula98afa312019-04-05 14:00:08 +030030#include <linux/sort.h>
31
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010032#include <drm/drm_debugfs.h>
33#include <drm/drm_fourcc.h>
Ben Gamari20172632009-02-17 20:08:50 -050034
Chris Wilson10be98a2019-05-28 10:29:49 +010035#include "gem/i915_gem_context.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010036#include "gt/intel_reset.h"
37
Jani Nikula2126d3e2019-05-02 18:02:43 +030038#include "i915_debugfs.h"
Jani Nikula440e2b32019-04-29 15:29:27 +030039#include "i915_irq.h"
Jani Nikula61764902019-05-02 18:02:39 +030040#include "intel_csr.h"
Jani Nikula27fec1f2019-04-05 14:00:17 +030041#include "intel_dp.h"
Jani Nikula98afa312019-04-05 14:00:08 +030042#include "intel_drv.h"
43#include "intel_fbc.h"
44#include "intel_guc_submission.h"
Jani Nikula408bd912019-04-05 14:00:13 +030045#include "intel_hdcp.h"
Jani Nikula05506912019-04-05 14:00:18 +030046#include "intel_hdmi.h"
Jani Nikula696173b2019-04-05 14:00:15 +030047#include "intel_pm.h"
Jani Nikula55367a22019-04-05 14:00:09 +030048#include "intel_psr.h"
Chris Wilson56c50982019-04-26 09:17:22 +010049#include "intel_sideband.h"
Chris Wilson9f588922019-01-16 15:33:04 +000050
David Weinehall36cdd012016-08-22 13:59:31 +030051static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
52{
53 return to_i915(node->minor->dev);
54}
55
Chris Wilson70d39fe2010-08-25 16:03:34 +010056static int i915_capabilities(struct seq_file *m, void *data)
57{
David Weinehall36cdd012016-08-22 13:59:31 +030058 struct drm_i915_private *dev_priv = node_to_i915(m->private);
59 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000060 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010061
David Weinehall36cdd012016-08-22 13:59:31 +030062 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020063 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030064 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000065
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000066 intel_device_info_dump_flags(info, &p);
Jani Nikula02584042018-12-31 16:56:41 +020067 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000068 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010069
Chris Wilson418e3cd2017-02-06 21:36:08 +000070 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000071 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000072 kernel_param_unlock(THIS_MODULE);
73
Chris Wilson70d39fe2010-08-25 16:03:34 +010074 return 0;
75}
Ben Gamari433e12f2009-02-17 20:08:51 -050076
Imre Deaka7363de2016-05-12 16:18:52 +030077static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000078{
Chris Wilson573adb32016-08-04 16:32:39 +010079 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000080}
81
Imre Deaka7363de2016-05-12 16:18:52 +030082static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010083{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010084 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010085}
86
Imre Deaka7363de2016-05-12 16:18:52 +030087static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000088{
Chris Wilson3e510a82016-08-05 10:14:23 +010089 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040090 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010091 case I915_TILING_NONE: return ' ';
92 case I915_TILING_X: return 'X';
93 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040094 }
Chris Wilsona6172a82009-02-11 14:26:38 +000095}
96
Imre Deaka7363de2016-05-12 16:18:52 +030097static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070098{
Chris Wilsona65adaf2017-10-09 09:43:57 +010099 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100100}
101
Imre Deaka7363de2016-05-12 16:18:52 +0300102static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100103{
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100104 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700105}
106
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100107static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
108{
109 u64 size = 0;
110 struct i915_vma *vma;
111
Chris Wilsone2189dd2017-12-07 21:14:07 +0000112 for_each_ggtt_vma(vma, obj) {
113 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100114 size += vma->node.size;
115 }
116
117 return size;
118}
119
Matthew Auld7393b7e2017-10-06 23:18:28 +0100120static const char *
121stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
122{
123 size_t x = 0;
124
125 switch (page_sizes) {
126 case 0:
127 return "";
128 case I915_GTT_PAGE_SIZE_4K:
129 return "4K";
130 case I915_GTT_PAGE_SIZE_64K:
131 return "64K";
132 case I915_GTT_PAGE_SIZE_2M:
133 return "2M";
134 default:
135 if (!buf)
136 return "M";
137
138 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
139 x += snprintf(buf + x, len - x, "2M, ");
140 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
141 x += snprintf(buf + x, len - x, "64K, ");
142 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
143 x += snprintf(buf + x, len - x, "4K, ");
144 buf[x-2] = '\0';
145
146 return buf;
147 }
148}
149
Chris Wilson37811fc2010-08-25 22:45:57 +0100150static void
151describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
152{
Chris Wilsonb4716182015-04-27 13:41:17 +0100153 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000154 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700155 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100156 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800157 int pin_count = 0;
158
Chris Wilson188c1ab2016-04-03 14:14:20 +0100159 lockdep_assert_held(&obj->base.dev->struct_mutex);
160
Chris Wilsond07f0e52016-10-28 13:58:44 +0100161 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100162 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100163 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100164 get_pin_flag(obj),
165 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700166 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100167 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800168 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100169 obj->read_domains,
170 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300171 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100172 obj->mm.dirty ? " dirty" : "",
173 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100174 if (obj->base.name)
175 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson528cbd12019-01-28 10:23:54 +0000176 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100177 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800178 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300179 }
180 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100181 if (obj->pin_global)
182 seq_printf(m, " (global)");
Chris Wilson528cbd12019-01-28 10:23:54 +0000183 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100184 if (!drm_mm_node_allocated(&vma->node))
185 continue;
186
Matthew Auld7393b7e2017-10-06 23:18:28 +0100187 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100188 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100189 vma->node.start, vma->node.size,
190 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000191 if (i915_vma_is_ggtt(vma)) {
192 switch (vma->ggtt_view.type) {
193 case I915_GGTT_VIEW_NORMAL:
194 seq_puts(m, ", normal");
195 break;
196
197 case I915_GGTT_VIEW_PARTIAL:
198 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000199 vma->ggtt_view.partial.offset << PAGE_SHIFT,
200 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000201 break;
202
203 case I915_GGTT_VIEW_ROTATED:
204 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000205 vma->ggtt_view.rotated.plane[0].width,
206 vma->ggtt_view.rotated.plane[0].height,
207 vma->ggtt_view.rotated.plane[0].stride,
208 vma->ggtt_view.rotated.plane[0].offset,
209 vma->ggtt_view.rotated.plane[1].width,
210 vma->ggtt_view.rotated.plane[1].height,
211 vma->ggtt_view.rotated.plane[1].stride,
212 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000213 break;
214
Ville Syrjälä1a74fc02019-05-09 15:21:52 +0300215 case I915_GGTT_VIEW_REMAPPED:
216 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
217 vma->ggtt_view.remapped.plane[0].width,
218 vma->ggtt_view.remapped.plane[0].height,
219 vma->ggtt_view.remapped.plane[0].stride,
220 vma->ggtt_view.remapped.plane[0].offset,
221 vma->ggtt_view.remapped.plane[1].width,
222 vma->ggtt_view.remapped.plane[1].height,
223 vma->ggtt_view.remapped.plane[1].stride,
224 vma->ggtt_view.remapped.plane[1].offset);
225 break;
226
Chris Wilson21976852017-01-12 11:21:08 +0000227 default:
228 MISSING_CASE(vma->ggtt_view.type);
229 break;
230 }
231 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100232 if (vma->fence)
233 seq_printf(m, " , fence: %d%s",
234 vma->fence->id,
Chris Wilson21950ee2019-02-05 13:00:05 +0000235 i915_active_request_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000236 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700237 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000238 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100239 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100240
Chris Wilsond07f0e52016-10-28 13:58:44 +0100241 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100242 if (engine)
243 seq_printf(m, " (%s)", engine->name);
244
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100245 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
246 if (frontbuffer_bits)
247 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100248}
249
Chris Wilsone637d2c2017-03-16 13:19:57 +0000250static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100251{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000252 const struct drm_i915_gem_object *a =
253 *(const struct drm_i915_gem_object **)A;
254 const struct drm_i915_gem_object *b =
255 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100256
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200257 if (a->stolen->start < b->stolen->start)
258 return -1;
259 if (a->stolen->start > b->stolen->start)
260 return 1;
261 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100262}
263
264static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
265{
David Weinehall36cdd012016-08-22 13:59:31 +0300266 struct drm_i915_private *dev_priv = node_to_i915(m->private);
267 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000268 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100269 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300270 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000271 unsigned long total, count, n;
Chris Wilsona8cff4c82019-06-10 15:54:30 +0100272 unsigned long flags;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000273 int ret;
274
Chris Wilsond82b4b22019-05-30 21:35:00 +0100275 total = READ_ONCE(dev_priv->mm.shrink_count);
Michal Hocko20981052017-05-17 14:23:12 +0200276 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000277 if (!objects)
278 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100279
280 ret = mutex_lock_interruptible(&dev->struct_mutex);
281 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000282 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100283
284 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100285
Chris Wilsona8cff4c82019-06-10 15:54:30 +0100286 spin_lock_irqsave(&dev_priv->mm.obj_lock, flags);
Chris Wilsonf2123812017-10-16 12:40:37 +0100287 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000288 if (count == total)
289 break;
290
Chris Wilson6d2b88852013-08-07 18:30:54 +0100291 if (obj->stolen == NULL)
292 continue;
293
Chris Wilsone637d2c2017-03-16 13:19:57 +0000294 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100295 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100296 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000297
Chris Wilson6d2b88852013-08-07 18:30:54 +0100298 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100299 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000300 if (count == total)
301 break;
302
Chris Wilson6d2b88852013-08-07 18:30:54 +0100303 if (obj->stolen == NULL)
304 continue;
305
Chris Wilsone637d2c2017-03-16 13:19:57 +0000306 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100307 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100308 }
Chris Wilsona8cff4c82019-06-10 15:54:30 +0100309 spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100310
Chris Wilsone637d2c2017-03-16 13:19:57 +0000311 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
312
313 seq_puts(m, "Stolen:\n");
314 for (n = 0; n < count; n++) {
315 seq_puts(m, " ");
316 describe_obj(m, objects[n]);
317 seq_putc(m, '\n');
318 }
319 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100320 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000321
322 mutex_unlock(&dev->struct_mutex);
323out:
Michal Hocko20981052017-05-17 14:23:12 +0200324 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000325 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100326}
327
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100328struct file_stats {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000329 struct i915_address_space *vm;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300330 unsigned long count;
331 u64 total, unbound;
332 u64 global, shared;
333 u64 active, inactive;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000334 u64 closed;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100335};
336
337static int per_file_stats(int id, void *ptr, void *data)
338{
339 struct drm_i915_gem_object *obj = ptr;
340 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000341 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100342
Chris Wilson0caf81b2017-06-17 12:57:44 +0100343 lockdep_assert_held(&obj->base.dev->struct_mutex);
344
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100345 stats->count++;
346 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100347 if (!obj->bind_count)
348 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000349 if (obj->base.name || obj->base.dma_buf)
350 stats->shared += obj->base.size;
351
Chris Wilson528cbd12019-01-28 10:23:54 +0000352 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100353 if (!drm_mm_node_allocated(&vma->node))
354 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000355
Chris Wilson3272db52016-08-04 16:32:32 +0100356 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100357 stats->global += vma->node.size;
358 } else {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000359 if (vma->vm != stats->vm)
Chris Wilson6313c202014-03-19 13:45:45 +0000360 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000361 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100362
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100363 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100364 stats->active += vma->node.size;
365 else
366 stats->inactive += vma->node.size;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000367
368 if (i915_vma_is_closed(vma))
369 stats->closed += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100370 }
371
372 return 0;
373}
374
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100375#define print_file_stats(m, name, stats) do { \
376 if (stats.count) \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000377 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100378 name, \
379 stats.count, \
380 stats.total, \
381 stats.active, \
382 stats.inactive, \
383 stats.global, \
384 stats.shared, \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000385 stats.unbound, \
386 stats.closed); \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100387} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800388
389static void print_batch_pool_stats(struct seq_file *m,
390 struct drm_i915_private *dev_priv)
391{
392 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000393 struct intel_engine_cs *engine;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000394 struct file_stats stats = {};
Akash Goel3b3f1652016-10-13 22:44:48 +0530395 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000396 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800397
Akash Goel3b3f1652016-10-13 22:44:48 +0530398 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000399 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100400 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000401 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100402 batch_pool_link)
403 per_file_stats(0, obj, &stats);
404 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100405 }
Brad Volkin493018d2014-12-11 12:13:08 -0800406
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100407 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800408}
409
Chris Wilson15da9562016-05-24 14:53:43 +0100410static void print_context_stats(struct seq_file *m,
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000411 struct drm_i915_private *i915)
Chris Wilson15da9562016-05-24 14:53:43 +0100412{
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000413 struct file_stats kstats = {};
414 struct i915_gem_context *ctx;
Chris Wilson15da9562016-05-24 14:53:43 +0100415
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000416 list_for_each_entry(ctx, &i915->contexts.list, link) {
Chris Wilson02684442019-04-26 17:33:35 +0100417 struct i915_gem_engines_iter it;
Chris Wilson7e3d9a52019-03-08 13:25:16 +0000418 struct intel_context *ce;
Chris Wilson15da9562016-05-24 14:53:43 +0100419
Chris Wilson02684442019-04-26 17:33:35 +0100420 for_each_gem_engine(ce,
421 i915_gem_context_lock_engines(ctx), it) {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000422 if (ce->state)
423 per_file_stats(0, ce->state->obj, &kstats);
424 if (ce->ring)
425 per_file_stats(0, ce->ring->vma->obj, &kstats);
426 }
Chris Wilson02684442019-04-26 17:33:35 +0100427 i915_gem_context_unlock_engines(ctx);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000428
429 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
Chris Wilsone568ac32019-06-11 10:12:37 +0100430 struct file_stats stats = { .vm = ctx->vm, };
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000431 struct drm_file *file = ctx->file_priv->file;
432 struct task_struct *task;
433 char name[80];
434
435 spin_lock(&file->table_lock);
436 idr_for_each(&file->object_idr, per_file_stats, &stats);
437 spin_unlock(&file->table_lock);
438
439 rcu_read_lock();
440 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
Chris Wilson3e055312019-03-21 14:07:10 +0000441 snprintf(name, sizeof(name), "%s",
442 task ? task->comm : "<unknown>");
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000443 rcu_read_unlock();
444
445 print_file_stats(m, name, stats);
446 }
Chris Wilson15da9562016-05-24 14:53:43 +0100447 }
Chris Wilson15da9562016-05-24 14:53:43 +0100448
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000449 print_file_stats(m, "[k]contexts", kstats);
Chris Wilson15da9562016-05-24 14:53:43 +0100450}
451
David Weinehall36cdd012016-08-22 13:59:31 +0300452static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100453{
David Weinehall36cdd012016-08-22 13:59:31 +0300454 struct drm_i915_private *dev_priv = node_to_i915(m->private);
455 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300456 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100457 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
458 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000459 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100460 unsigned int page_sizes = 0;
Chris Wilsona8cff4c82019-06-10 15:54:30 +0100461 unsigned long flags;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100462 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100463 int ret;
464
Chris Wilsond82b4b22019-05-30 21:35:00 +0100465 seq_printf(m, "%u shrinkable objects, %llu bytes\n",
466 dev_priv->mm.shrink_count,
467 dev_priv->mm.shrink_memory);
Chris Wilson6299f992010-11-24 12:23:44 +0000468
Chris Wilson1544c422016-08-15 13:18:16 +0100469 size = count = 0;
470 mapped_size = mapped_count = 0;
471 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100472 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100473
Chris Wilsona8cff4c82019-06-10 15:54:30 +0100474 spin_lock_irqsave(&dev_priv->mm.obj_lock, flags);
Chris Wilsonf2123812017-10-16 12:40:37 +0100475 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100476 size += obj->base.size;
477 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200478
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100479 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200480 purgeable_size += obj->base.size;
481 ++purgeable_count;
482 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100483
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100484 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100485 mapped_count++;
486 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100487 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100488
489 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 huge_count++;
491 huge_size += obj->base.size;
492 page_sizes |= obj->mm.page_sizes.sg;
493 }
Chris Wilson6299f992010-11-24 12:23:44 +0000494 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100495 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
496
497 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100498 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100499 size += obj->base.size;
500 ++count;
501
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100502 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100503 dpy_size += obj->base.size;
504 ++dpy_count;
505 }
506
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100507 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100508 purgeable_size += obj->base.size;
509 ++purgeable_count;
510 }
511
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100512 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100513 mapped_count++;
514 mapped_size += obj->base.size;
515 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100516
517 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
518 huge_count++;
519 huge_size += obj->base.size;
520 page_sizes |= obj->mm.page_sizes.sg;
521 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100522 }
Chris Wilsona8cff4c82019-06-10 15:54:30 +0100523 spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags);
Chris Wilsonf2123812017-10-16 12:40:37 +0100524
Chris Wilson2bd160a2016-08-15 10:48:45 +0100525 seq_printf(m, "%u bound objects, %llu bytes\n",
526 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300527 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200528 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100529 seq_printf(m, "%u mapped objects, %llu bytes\n",
530 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100531 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
532 huge_count,
533 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
534 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100535 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100536 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000537
Matthew Auldb7128ef2017-12-11 15:18:22 +0000538 seq_printf(m, "%llu [%pa] gtt total\n",
Chris Wilson82ad6442018-06-05 16:37:58 +0100539 ggtt->vm.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100540 seq_printf(m, "Supported page sizes: %s\n",
541 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
542 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100543
Damien Lespiau267f0c92013-06-24 22:59:48 +0100544 seq_putc(m, '\n');
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000545
546 ret = mutex_lock_interruptible(&dev->struct_mutex);
547 if (ret)
548 return ret;
549
Brad Volkin493018d2014-12-11 12:13:08 -0800550 print_batch_pool_stats(m, dev_priv);
Chris Wilson15da9562016-05-24 14:53:43 +0100551 print_context_stats(m, dev_priv);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000552 mutex_unlock(&dev->struct_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100553
554 return 0;
555}
556
Brad Volkin493018d2014-12-11 12:13:08 -0800557static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
558{
David Weinehall36cdd012016-08-22 13:59:31 +0300559 struct drm_i915_private *dev_priv = node_to_i915(m->private);
560 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800561 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000562 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530563 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100564 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000565 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800566
567 ret = mutex_lock_interruptible(&dev->struct_mutex);
568 if (ret)
569 return ret;
570
Akash Goel3b3f1652016-10-13 22:44:48 +0530571 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000572 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100573 int count;
574
575 count = 0;
576 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000577 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100578 batch_pool_link)
579 count++;
580 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000581 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100582
583 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000584 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100585 batch_pool_link) {
586 seq_puts(m, " ");
587 describe_obj(m, obj);
588 seq_putc(m, '\n');
589 }
590
591 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100592 }
Brad Volkin493018d2014-12-11 12:13:08 -0800593 }
594
Chris Wilson8d9d5742015-04-07 16:20:38 +0100595 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800596
597 mutex_unlock(&dev->struct_mutex);
598
599 return 0;
600}
601
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200602static void gen8_display_interrupt_info(struct seq_file *m)
603{
604 struct drm_i915_private *dev_priv = node_to_i915(m->private);
605 int pipe;
606
607 for_each_pipe(dev_priv, pipe) {
608 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000609 intel_wakeref_t wakeref;
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200610
611 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000612 wakeref = intel_display_power_get_if_enabled(dev_priv,
613 power_domain);
614 if (!wakeref) {
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200615 seq_printf(m, "Pipe %c power disabled\n",
616 pipe_name(pipe));
617 continue;
618 }
619 seq_printf(m, "Pipe %c IMR:\t%08x\n",
620 pipe_name(pipe),
621 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
622 seq_printf(m, "Pipe %c IIR:\t%08x\n",
623 pipe_name(pipe),
624 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
625 seq_printf(m, "Pipe %c IER:\t%08x\n",
626 pipe_name(pipe),
627 I915_READ(GEN8_DE_PIPE_IER(pipe)));
628
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000629 intel_display_power_put(dev_priv, power_domain, wakeref);
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200630 }
631
632 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
633 I915_READ(GEN8_DE_PORT_IMR));
634 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
635 I915_READ(GEN8_DE_PORT_IIR));
636 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
637 I915_READ(GEN8_DE_PORT_IER));
638
639 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
640 I915_READ(GEN8_DE_MISC_IMR));
641 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
642 I915_READ(GEN8_DE_MISC_IIR));
643 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
644 I915_READ(GEN8_DE_MISC_IER));
645
646 seq_printf(m, "PCU interrupt mask:\t%08x\n",
647 I915_READ(GEN8_PCU_IMR));
648 seq_printf(m, "PCU interrupt identity:\t%08x\n",
649 I915_READ(GEN8_PCU_IIR));
650 seq_printf(m, "PCU interrupt enable:\t%08x\n",
651 I915_READ(GEN8_PCU_IER));
652}
653
Ben Gamari20172632009-02-17 20:08:50 -0500654static int i915_interrupt_info(struct seq_file *m, void *data)
655{
David Weinehall36cdd012016-08-22 13:59:31 +0300656 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000657 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530658 enum intel_engine_id id;
Chris Wilsona0371212019-01-14 14:21:14 +0000659 intel_wakeref_t wakeref;
Chris Wilson4bb05042016-09-03 07:53:43 +0100660 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100661
Chris Wilsona0371212019-01-14 14:21:14 +0000662 wakeref = intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500663
David Weinehall36cdd012016-08-22 13:59:31 +0300664 if (IS_CHERRYVIEW(dev_priv)) {
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000665 intel_wakeref_t pref;
666
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300667 seq_printf(m, "Master Interrupt Control:\t%08x\n",
668 I915_READ(GEN8_MASTER_IRQ));
669
670 seq_printf(m, "Display IER:\t%08x\n",
671 I915_READ(VLV_IER));
672 seq_printf(m, "Display IIR:\t%08x\n",
673 I915_READ(VLV_IIR));
674 seq_printf(m, "Display IIR_RW:\t%08x\n",
675 I915_READ(VLV_IIR_RW));
676 seq_printf(m, "Display IMR:\t%08x\n",
677 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100678 for_each_pipe(dev_priv, pipe) {
679 enum intel_display_power_domain power_domain;
680
681 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000682 pref = intel_display_power_get_if_enabled(dev_priv,
683 power_domain);
684 if (!pref) {
Chris Wilson9c870d02016-10-24 13:42:15 +0100685 seq_printf(m, "Pipe %c power disabled\n",
686 pipe_name(pipe));
687 continue;
688 }
689
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300690 seq_printf(m, "Pipe %c stat:\t%08x\n",
691 pipe_name(pipe),
692 I915_READ(PIPESTAT(pipe)));
693
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000694 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson9c870d02016-10-24 13:42:15 +0100695 }
696
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000697 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300698 seq_printf(m, "Port hotplug:\t%08x\n",
699 I915_READ(PORT_HOTPLUG_EN));
700 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
701 I915_READ(VLV_DPFLIPSTAT));
702 seq_printf(m, "DPINVGTT:\t%08x\n",
703 I915_READ(DPINVGTT));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000704 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300705
706 for (i = 0; i < 4; i++) {
707 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
708 i, I915_READ(GEN8_GT_IMR(i)));
709 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
710 i, I915_READ(GEN8_GT_IIR(i)));
711 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
712 i, I915_READ(GEN8_GT_IER(i)));
713 }
714
715 seq_printf(m, "PCU interrupt mask:\t%08x\n",
716 I915_READ(GEN8_PCU_IMR));
717 seq_printf(m, "PCU interrupt identity:\t%08x\n",
718 I915_READ(GEN8_PCU_IIR));
719 seq_printf(m, "PCU interrupt enable:\t%08x\n",
720 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200721 } else if (INTEL_GEN(dev_priv) >= 11) {
722 seq_printf(m, "Master Interrupt Control: %08x\n",
723 I915_READ(GEN11_GFX_MSTR_IRQ));
724
725 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
726 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
727 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
728 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
729 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
730 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
731 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
732 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
733 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
734 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
735 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
736 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
737
738 seq_printf(m, "Display Interrupt Control:\t%08x\n",
739 I915_READ(GEN11_DISPLAY_INT_CTL));
740
741 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300742 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700743 seq_printf(m, "Master Interrupt Control:\t%08x\n",
744 I915_READ(GEN8_MASTER_IRQ));
745
746 for (i = 0; i < 4; i++) {
747 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
748 i, I915_READ(GEN8_GT_IMR(i)));
749 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
750 i, I915_READ(GEN8_GT_IIR(i)));
751 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
752 i, I915_READ(GEN8_GT_IER(i)));
753 }
754
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200755 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300756 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700757 seq_printf(m, "Display IER:\t%08x\n",
758 I915_READ(VLV_IER));
759 seq_printf(m, "Display IIR:\t%08x\n",
760 I915_READ(VLV_IIR));
761 seq_printf(m, "Display IIR_RW:\t%08x\n",
762 I915_READ(VLV_IIR_RW));
763 seq_printf(m, "Display IMR:\t%08x\n",
764 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000765 for_each_pipe(dev_priv, pipe) {
766 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000767 intel_wakeref_t pref;
Chris Wilson4f4631a2017-02-10 13:36:32 +0000768
769 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000770 pref = intel_display_power_get_if_enabled(dev_priv,
771 power_domain);
772 if (!pref) {
Chris Wilson4f4631a2017-02-10 13:36:32 +0000773 seq_printf(m, "Pipe %c power disabled\n",
774 pipe_name(pipe));
775 continue;
776 }
777
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700778 seq_printf(m, "Pipe %c stat:\t%08x\n",
779 pipe_name(pipe),
780 I915_READ(PIPESTAT(pipe)));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000781 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson4f4631a2017-02-10 13:36:32 +0000782 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700783
784 seq_printf(m, "Master IER:\t%08x\n",
785 I915_READ(VLV_MASTER_IER));
786
787 seq_printf(m, "Render IER:\t%08x\n",
788 I915_READ(GTIER));
789 seq_printf(m, "Render IIR:\t%08x\n",
790 I915_READ(GTIIR));
791 seq_printf(m, "Render IMR:\t%08x\n",
792 I915_READ(GTIMR));
793
794 seq_printf(m, "PM IER:\t\t%08x\n",
795 I915_READ(GEN6_PMIER));
796 seq_printf(m, "PM IIR:\t\t%08x\n",
797 I915_READ(GEN6_PMIIR));
798 seq_printf(m, "PM IMR:\t\t%08x\n",
799 I915_READ(GEN6_PMIMR));
800
801 seq_printf(m, "Port hotplug:\t%08x\n",
802 I915_READ(PORT_HOTPLUG_EN));
803 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
804 I915_READ(VLV_DPFLIPSTAT));
805 seq_printf(m, "DPINVGTT:\t%08x\n",
806 I915_READ(DPINVGTT));
807
David Weinehall36cdd012016-08-22 13:59:31 +0300808 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800809 seq_printf(m, "Interrupt enable: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700810 I915_READ(GEN2_IER));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800811 seq_printf(m, "Interrupt identity: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700812 I915_READ(GEN2_IIR));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800813 seq_printf(m, "Interrupt mask: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700814 I915_READ(GEN2_IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100815 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800816 seq_printf(m, "Pipe %c stat: %08x\n",
817 pipe_name(pipe),
818 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800819 } else {
820 seq_printf(m, "North Display Interrupt enable: %08x\n",
821 I915_READ(DEIER));
822 seq_printf(m, "North Display Interrupt identity: %08x\n",
823 I915_READ(DEIIR));
824 seq_printf(m, "North Display Interrupt mask: %08x\n",
825 I915_READ(DEIMR));
826 seq_printf(m, "South Display Interrupt enable: %08x\n",
827 I915_READ(SDEIER));
828 seq_printf(m, "South Display Interrupt identity: %08x\n",
829 I915_READ(SDEIIR));
830 seq_printf(m, "South Display Interrupt mask: %08x\n",
831 I915_READ(SDEIMR));
832 seq_printf(m, "Graphics Interrupt enable: %08x\n",
833 I915_READ(GTIER));
834 seq_printf(m, "Graphics Interrupt identity: %08x\n",
835 I915_READ(GTIIR));
836 seq_printf(m, "Graphics Interrupt mask: %08x\n",
837 I915_READ(GTIMR));
838 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200839
840 if (INTEL_GEN(dev_priv) >= 11) {
841 seq_printf(m, "RCS Intr Mask:\t %08x\n",
842 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
843 seq_printf(m, "BCS Intr Mask:\t %08x\n",
844 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
845 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
846 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
847 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
848 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
849 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
850 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
851 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
852 I915_READ(GEN11_GUC_SG_INTR_MASK));
853 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
854 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
855 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
856 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
857 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
858 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
859
860 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000861 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100862 seq_printf(m,
863 "Graphics Interrupt mask (%s): %08x\n",
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -0700864 engine->name, ENGINE_READ(engine, RING_IMR));
Chris Wilson9862e602011-01-04 22:22:17 +0000865 }
Chris Wilson9862e602011-01-04 22:22:17 +0000866 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200867
Chris Wilsona0371212019-01-14 14:21:14 +0000868 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100869
Ben Gamari20172632009-02-17 20:08:50 -0500870 return 0;
871}
872
Chris Wilsona6172a82009-02-11 14:26:38 +0000873static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
874{
David Weinehall36cdd012016-08-22 13:59:31 +0300875 struct drm_i915_private *dev_priv = node_to_i915(m->private);
876 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100877 int i, ret;
878
879 ret = mutex_lock_interruptible(&dev->struct_mutex);
880 if (ret)
881 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000882
Chris Wilsona6172a82009-02-11 14:26:38 +0000883 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
884 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100885 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000886
Chris Wilson6c085a72012-08-20 11:40:46 +0200887 seq_printf(m, "Fence %d, pin count = %d, object = ",
888 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100889 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100890 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100891 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100892 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100893 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000894 }
895
Chris Wilson05394f32010-11-08 19:18:58 +0000896 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000897 return 0;
898}
899
Chris Wilson98a2f412016-10-12 10:05:18 +0100900#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000901static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
902 size_t count, loff_t *pos)
903{
Chris Wilson0e390372018-11-23 13:23:25 +0000904 struct i915_gpu_state *error;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000905 ssize_t ret;
Chris Wilson0e390372018-11-23 13:23:25 +0000906 void *buf;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000907
Chris Wilson0e390372018-11-23 13:23:25 +0000908 error = file->private_data;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000909 if (!error)
910 return 0;
911
Chris Wilson0e390372018-11-23 13:23:25 +0000912 /* Bounce buffer required because of kernfs __user API convenience. */
913 buf = kmalloc(count, GFP_KERNEL);
914 if (!buf)
915 return -ENOMEM;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000916
Chris Wilson0e390372018-11-23 13:23:25 +0000917 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
918 if (ret <= 0)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000919 goto out;
920
Chris Wilson0e390372018-11-23 13:23:25 +0000921 if (!copy_to_user(ubuf, buf, ret))
922 *pos += ret;
923 else
924 ret = -EFAULT;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000925
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000926out:
Chris Wilson0e390372018-11-23 13:23:25 +0000927 kfree(buf);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000928 return ret;
929}
930
931static int gpu_state_release(struct inode *inode, struct file *file)
932{
933 i915_gpu_state_put(file->private_data);
934 return 0;
935}
936
937static int i915_gpu_info_open(struct inode *inode, struct file *file)
938{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100939 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000940 struct i915_gpu_state *gpu;
Chris Wilsona0371212019-01-14 14:21:14 +0000941 intel_wakeref_t wakeref;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000942
Chris Wilsond4225a52019-01-14 14:21:23 +0000943 gpu = NULL;
944 with_intel_runtime_pm(i915, wakeref)
945 gpu = i915_capture_gpu_state(i915);
Chris Wilsone6154e42018-12-07 11:05:54 +0000946 if (IS_ERR(gpu))
947 return PTR_ERR(gpu);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000948
949 file->private_data = gpu;
950 return 0;
951}
952
953static const struct file_operations i915_gpu_info_fops = {
954 .owner = THIS_MODULE,
955 .open = i915_gpu_info_open,
956 .read = gpu_state_read,
957 .llseek = default_llseek,
958 .release = gpu_state_release,
959};
Chris Wilson98a2f412016-10-12 10:05:18 +0100960
Daniel Vetterd5442302012-04-27 15:17:40 +0200961static ssize_t
962i915_error_state_write(struct file *filp,
963 const char __user *ubuf,
964 size_t cnt,
965 loff_t *ppos)
966{
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000967 struct i915_gpu_state *error = filp->private_data;
968
969 if (!error)
970 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200971
972 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000973 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +0200974
975 return cnt;
976}
977
978static int i915_error_state_open(struct inode *inode, struct file *file)
979{
Chris Wilsone6154e42018-12-07 11:05:54 +0000980 struct i915_gpu_state *error;
981
982 error = i915_first_error_state(inode->i_private);
983 if (IS_ERR(error))
984 return PTR_ERR(error);
985
986 file->private_data = error;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300987 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200988}
989
Daniel Vetterd5442302012-04-27 15:17:40 +0200990static const struct file_operations i915_error_state_fops = {
991 .owner = THIS_MODULE,
992 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000993 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +0200994 .write = i915_error_state_write,
995 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000996 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +0200997};
Chris Wilson98a2f412016-10-12 10:05:18 +0100998#endif
999
Deepak Sadb4bd12014-03-31 11:30:02 +05301000static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001001{
David Weinehall36cdd012016-08-22 13:59:31 +03001002 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001003 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsona0371212019-01-14 14:21:14 +00001004 intel_wakeref_t wakeref;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001005 int ret = 0;
1006
Chris Wilsona0371212019-01-14 14:21:14 +00001007 wakeref = intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001008
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001009 if (IS_GEN(dev_priv, 5)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001010 u16 rgvswctl = I915_READ16(MEMSWCTL);
1011 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1012
1013 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1014 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1015 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1016 MEMSTAT_VID_SHIFT);
1017 seq_printf(m, "Current P-state: %d\n",
1018 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001019 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001020 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001021
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001022 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1023 seq_printf(m, "Video Turbo Mode: %s\n",
1024 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1025 seq_printf(m, "HW control enabled: %s\n",
1026 yesno(rpmodectl & GEN6_RP_ENABLE));
1027 seq_printf(m, "SW control enabled: %s\n",
1028 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1029 GEN6_RP_MEDIA_SW_MODE));
1030
Chris Wilson337fa6e2019-04-26 09:17:20 +01001031 vlv_punit_get(dev_priv);
Wayne Boyer666a4532015-12-09 12:29:35 -08001032 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Chris Wilson337fa6e2019-04-26 09:17:20 +01001033 vlv_punit_put(dev_priv);
1034
Wayne Boyer666a4532015-12-09 12:29:35 -08001035 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1036 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1037
1038 seq_printf(m, "actual GPU freq: %d MHz\n",
1039 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1040
1041 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001042 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001043
1044 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001045 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001046
1047 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001048 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001049
1050 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001051 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001052
1053 seq_printf(m,
1054 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001055 intel_gpu_freq(dev_priv, rps->efficient_freq));
David Weinehall36cdd012016-08-22 13:59:31 +03001056 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001057 u32 rp_state_limits;
1058 u32 gt_perf_status;
1059 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001060 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001061 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001062 u32 rpupei, rpcurup, rpprevup;
1063 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001064 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001065 int max_freq;
1066
Bob Paauwe35040562015-06-25 14:54:07 -07001067 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001068 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001069 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1070 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1071 } else {
1072 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1073 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1074 }
1075
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001076 /* RPSTAT1 is in the GT power well */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001077 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001078
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001079 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001080 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301081 reqf >>= 23;
1082 else {
1083 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001084 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301085 reqf >>= 24;
1086 else
1087 reqf >>= 25;
1088 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001089 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001090
Chris Wilson0d8f9492014-03-27 09:06:14 +00001091 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1092 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1093 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1094
Jesse Barnesccab5c82011-01-18 15:49:25 -08001095 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301096 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1097 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1098 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1099 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1100 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1101 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001102 cagf = intel_gpu_freq(dev_priv,
1103 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001104
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001105 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001106
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001107 if (INTEL_GEN(dev_priv) >= 11) {
1108 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1109 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1110 /*
1111 * The equivalent to the PM ISR & IIR cannot be read
1112 * without affecting the current state of the system
1113 */
1114 pm_isr = 0;
1115 pm_iir = 0;
1116 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001117 pm_ier = I915_READ(GEN8_GT_IER(2));
1118 pm_imr = I915_READ(GEN8_GT_IMR(2));
1119 pm_isr = I915_READ(GEN8_GT_ISR(2));
1120 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001121 } else {
1122 pm_ier = I915_READ(GEN6_PMIER);
1123 pm_imr = I915_READ(GEN6_PMIMR);
1124 pm_isr = I915_READ(GEN6_PMISR);
1125 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001126 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001127 pm_mask = I915_READ(GEN6_PMINTRMSK);
1128
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001129 seq_printf(m, "Video Turbo Mode: %s\n",
1130 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1131 seq_printf(m, "HW control enabled: %s\n",
1132 yesno(rpmodectl & GEN6_RP_ENABLE));
1133 seq_printf(m, "SW control enabled: %s\n",
1134 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1135 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001136
1137 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1138 pm_ier, pm_imr, pm_mask);
1139 if (INTEL_GEN(dev_priv) <= 10)
1140 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1141 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301142 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001143 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001144 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001145 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001146 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001147 seq_printf(m, "Render p-state VID: %d\n",
1148 gt_perf_status & 0xff);
1149 seq_printf(m, "Render p-state limit: %d\n",
1150 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001151 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1152 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1153 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1154 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001155 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001156 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301157 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1158 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1159 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1160 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1161 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1162 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilson60548c52018-07-31 14:26:29 +01001163 seq_printf(m, "Up threshold: %d%%\n",
1164 rps->power.up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001165
Akash Goeld6cda9c2016-04-23 00:05:46 +05301166 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1167 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1168 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1169 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1170 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1171 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilson60548c52018-07-31 14:26:29 +01001172 seq_printf(m, "Down threshold: %d%%\n",
1173 rps->power.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001174
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001175 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001176 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001177 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001178 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001179 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001180 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001181
1182 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001183 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001184 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001185 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001186 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001187
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001188 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001189 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001190 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001191 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001192 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001193 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001194 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001195 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001196
Chris Wilsond86ed342015-04-27 13:41:19 +01001197 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001198 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001199 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001200 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001201 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001202 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001203 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001204 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001205 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001206 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001207 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001208 seq_printf(m,
1209 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001210 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001211 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001212 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001213 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001214
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001215 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001216 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1217 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1218
Chris Wilsona0371212019-01-14 14:21:14 +00001219 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001220 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001221}
1222
Ben Widawskyd6369512016-09-20 16:54:32 +03001223static void i915_instdone_info(struct drm_i915_private *dev_priv,
1224 struct seq_file *m,
1225 struct intel_instdone *instdone)
1226{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001227 int slice;
1228 int subslice;
1229
Ben Widawskyd6369512016-09-20 16:54:32 +03001230 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1231 instdone->instdone);
1232
1233 if (INTEL_GEN(dev_priv) <= 3)
1234 return;
1235
1236 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1237 instdone->slice_common);
1238
1239 if (INTEL_GEN(dev_priv) <= 6)
1240 return;
1241
Jani Nikulaa10f3612019-05-29 11:21:50 +03001242 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
Ben Widawskyf9e61372016-09-20 16:54:33 +03001243 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1244 slice, subslice, instdone->sampler[slice][subslice]);
1245
Jani Nikulaa10f3612019-05-29 11:21:50 +03001246 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
Ben Widawskyf9e61372016-09-20 16:54:33 +03001247 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1248 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001249}
1250
Chris Wilsonf6544492015-01-26 18:03:04 +02001251static int i915_hangcheck_info(struct seq_file *m, void *unused)
1252{
David Weinehall36cdd012016-08-22 13:59:31 +03001253 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001254 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001255 u64 acthd[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001256 struct intel_instdone instdone;
Chris Wilsona0371212019-01-14 14:21:14 +00001257 intel_wakeref_t wakeref;
Dave Gordonc3232b12016-03-23 18:19:53 +00001258 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001259
Chris Wilson2caffbf2019-02-08 15:37:03 +00001260 seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
Chris Wilson8af29b02016-09-09 14:11:47 +01001261 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001262 seq_puts(m, "\tWedged\n");
Chris Wilson8c185ec2017-03-16 17:13:02 +00001263 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001264 seq_puts(m, "\tDevice (global) reset in progress\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001265
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001266 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001267 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001268 return 0;
1269 }
1270
Chris Wilsond4225a52019-01-14 14:21:23 +00001271 with_intel_runtime_pm(dev_priv, wakeref) {
Chris Wilson519a0192019-05-08 09:06:25 +01001272 for_each_engine(engine, dev_priv, id)
Chris Wilsond4225a52019-01-14 14:21:23 +00001273 acthd[id] = intel_engine_get_active_head(engine);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001274
Chris Wilson8a68d462019-03-05 18:03:30 +00001275 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001276 }
1277
Chris Wilson8352aea2017-03-03 09:00:56 +00001278 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1279 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001280 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1281 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001282 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1283 seq_puts(m, "Hangcheck active, work pending\n");
1284 else
1285 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001286
Chris Wilsonf73b5672017-03-02 15:03:56 +00001287 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1288
Akash Goel3b3f1652016-10-13 22:44:48 +05301289 for_each_engine(engine, dev_priv, id) {
Chris Wilson519a0192019-05-08 09:06:25 +01001290 seq_printf(m, "%s: %d ms ago\n",
1291 engine->name,
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001292 jiffies_to_msecs(jiffies -
1293 engine->hangcheck.action_timestamp));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001294
Chris Wilsonf6544492015-01-26 18:03:04 +02001295 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001296 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001297 (long long)acthd[id]);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001298
Chris Wilson8a68d462019-03-05 18:03:30 +00001299 if (engine->id == RCS0) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001300 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001301
Ben Widawskyd6369512016-09-20 16:54:32 +03001302 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001303
Ben Widawskyd6369512016-09-20 16:54:32 +03001304 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001305
Ben Widawskyd6369512016-09-20 16:54:32 +03001306 i915_instdone_info(dev_priv, m,
1307 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001308 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001309 }
1310
1311 return 0;
1312}
1313
Michel Thierry061d06a2017-06-20 10:57:49 +01001314static int i915_reset_info(struct seq_file *m, void *unused)
1315{
1316 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1317 struct i915_gpu_error *error = &dev_priv->gpu_error;
1318 struct intel_engine_cs *engine;
1319 enum intel_engine_id id;
1320
1321 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1322
1323 for_each_engine(engine, dev_priv, id) {
1324 seq_printf(m, "%s = %u\n", engine->name,
1325 i915_reset_engine_count(error, engine));
1326 }
1327
1328 return 0;
1329}
1330
Ben Widawsky4d855292011-12-12 19:34:16 -08001331static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001332{
David Weinehall36cdd012016-08-22 13:59:31 +03001333 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001334 u32 rgvmodectl, rstdbyctl;
1335 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001336
Ben Widawsky616fdb52011-10-05 11:44:54 -07001337 rgvmodectl = I915_READ(MEMMODECTL);
1338 rstdbyctl = I915_READ(RSTDBYCTL);
1339 crstandvid = I915_READ16(CRSTANDVID);
1340
Jani Nikula742f4912015-09-03 11:16:09 +03001341 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001342 seq_printf(m, "Boost freq: %d\n",
1343 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1344 MEMMODE_BOOST_FREQ_SHIFT);
1345 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001346 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001347 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001348 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001349 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001350 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001351 seq_printf(m, "Starting frequency: P%d\n",
1352 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001353 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001354 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001355 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1356 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1357 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1358 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001359 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001360 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001361 switch (rstdbyctl & RSX_STATUS_MASK) {
1362 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001363 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001364 break;
1365 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001366 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001367 break;
1368 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001369 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001370 break;
1371 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001372 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001373 break;
1374 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001375 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001376 break;
1377 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001378 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001379 break;
1380 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001381 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001382 break;
1383 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001384
1385 return 0;
1386}
1387
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001388static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001389{
Chris Wilson233ebf52017-03-23 10:19:44 +00001390 struct drm_i915_private *i915 = node_to_i915(m->private);
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001391 struct intel_uncore *uncore = &i915->uncore;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001392 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001393 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001394
Chris Wilsond7a133d2017-09-07 14:44:41 +01001395 seq_printf(m, "user.bypass_count = %u\n",
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001396 uncore->user_forcewake.count);
Chris Wilsond7a133d2017-09-07 14:44:41 +01001397
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001398 for_each_fw_domain(fw_domain, uncore, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001399 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001400 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001401 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001402
1403 return 0;
1404}
1405
Mika Kuoppala13628772017-03-15 17:43:02 +02001406static void print_rc6_res(struct seq_file *m,
1407 const char *title,
1408 const i915_reg_t reg)
1409{
1410 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1411
1412 seq_printf(m, "%s %u (%llu us)\n",
1413 title, I915_READ(reg),
1414 intel_rc6_residency_us(dev_priv, reg));
1415}
1416
Deepak S669ab5a2014-01-10 15:18:26 +05301417static int vlv_drpc_info(struct seq_file *m)
1418{
David Weinehall36cdd012016-08-22 13:59:31 +03001419 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001420 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301421
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001422 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301423 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1424
Deepak S669ab5a2014-01-10 15:18:26 +05301425 seq_printf(m, "RC6 Enabled: %s\n",
1426 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1427 GEN6_RC_CTL_EI_MODE(1))));
1428 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001429 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301430 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001431 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301432
Mika Kuoppala13628772017-03-15 17:43:02 +02001433 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1434 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001435
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001436 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301437}
1438
Ben Widawsky4d855292011-12-12 19:34:16 -08001439static int gen6_drpc_info(struct seq_file *m)
1440{
David Weinehall36cdd012016-08-22 13:59:31 +03001441 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001442 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301443 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001444
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001445 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001446 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001447
Ben Widawsky4d855292011-12-12 19:34:16 -08001448 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001449 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301450 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1451 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1452 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001453
Chris Wilsonebb5eb72019-04-26 09:17:21 +01001454 if (INTEL_GEN(dev_priv) <= 7)
Imre Deak51cc9ad2018-02-08 19:41:02 +02001455 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
Ville Syrjäläd284d512019-05-21 19:40:24 +03001456 &rc6vids, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001457
Eric Anholtfff24e22012-01-23 16:14:05 -08001458 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001459 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1460 seq_printf(m, "RC6 Enabled: %s\n",
1461 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001462 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301463 seq_printf(m, "Render Well Gating Enabled: %s\n",
1464 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1465 seq_printf(m, "Media Well Gating Enabled: %s\n",
1466 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1467 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001468 seq_printf(m, "Deep RC6 Enabled: %s\n",
1469 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1470 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1471 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001472 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001473 switch (gt_core_status & GEN6_RCn_MASK) {
1474 case GEN6_RC0:
1475 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001476 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001477 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001478 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001479 break;
1480 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001481 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001482 break;
1483 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001484 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001485 break;
1486 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001487 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001488 break;
1489 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001490 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001491 break;
1492 }
1493
1494 seq_printf(m, "Core Power Down: %s\n",
1495 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001496 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301497 seq_printf(m, "Render Power Well: %s\n",
1498 (gen9_powergate_status &
1499 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1500 seq_printf(m, "Media Power Well: %s\n",
1501 (gen9_powergate_status &
1502 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1503 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001504
1505 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001506 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1507 GEN6_GT_GFX_RC6_LOCKED);
1508 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1509 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1510 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001511
Imre Deak51cc9ad2018-02-08 19:41:02 +02001512 if (INTEL_GEN(dev_priv) <= 7) {
1513 seq_printf(m, "RC6 voltage: %dmV\n",
1514 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1515 seq_printf(m, "RC6+ voltage: %dmV\n",
1516 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1517 seq_printf(m, "RC6++ voltage: %dmV\n",
1518 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1519 }
1520
Akash Goelf2dd7572016-06-27 20:10:01 +05301521 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001522}
1523
1524static int i915_drpc_info(struct seq_file *m, void *unused)
1525{
David Weinehall36cdd012016-08-22 13:59:31 +03001526 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001527 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00001528 int err = -ENODEV;
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001529
Chris Wilsond4225a52019-01-14 14:21:23 +00001530 with_intel_runtime_pm(dev_priv, wakeref) {
1531 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1532 err = vlv_drpc_info(m);
1533 else if (INTEL_GEN(dev_priv) >= 6)
1534 err = gen6_drpc_info(m);
1535 else
1536 err = ironlake_drpc_info(m);
1537 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001538
1539 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001540}
1541
Daniel Vetter9a851782015-06-18 10:30:22 +02001542static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1543{
David Weinehall36cdd012016-08-22 13:59:31 +03001544 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001545
1546 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1547 dev_priv->fb_tracking.busy_bits);
1548
1549 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1550 dev_priv->fb_tracking.flip_bits);
1551
1552 return 0;
1553}
1554
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001555static int i915_fbc_status(struct seq_file *m, void *unused)
1556{
David Weinehall36cdd012016-08-22 13:59:31 +03001557 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001558 struct intel_fbc *fbc = &dev_priv->fbc;
Chris Wilsona0371212019-01-14 14:21:14 +00001559 intel_wakeref_t wakeref;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001560
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001561 if (!HAS_FBC(dev_priv))
1562 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001563
Chris Wilsona0371212019-01-14 14:21:14 +00001564 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001565 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001566
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001567 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001568 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001569 else
Chris Wilson31388722017-12-20 20:58:48 +00001570 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1571
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001572 if (intel_fbc_is_active(dev_priv)) {
1573 u32 mask;
1574
1575 if (INTEL_GEN(dev_priv) >= 8)
1576 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1577 else if (INTEL_GEN(dev_priv) >= 7)
1578 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1579 else if (INTEL_GEN(dev_priv) >= 5)
1580 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1581 else if (IS_G4X(dev_priv))
1582 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1583 else
1584 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1585 FBC_STAT_COMPRESSED);
1586
1587 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001588 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001589
Chris Wilson31388722017-12-20 20:58:48 +00001590 mutex_unlock(&fbc->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00001591 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001592
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001593 return 0;
1594}
1595
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001596static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001597{
David Weinehall36cdd012016-08-22 13:59:31 +03001598 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001599
David Weinehall36cdd012016-08-22 13:59:31 +03001600 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001601 return -ENODEV;
1602
Rodrigo Vivida46f932014-08-01 02:04:45 -07001603 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001604
1605 return 0;
1606}
1607
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001608static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001609{
David Weinehall36cdd012016-08-22 13:59:31 +03001610 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001611 u32 reg;
1612
David Weinehall36cdd012016-08-22 13:59:31 +03001613 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001614 return -ENODEV;
1615
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001616 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001617
1618 reg = I915_READ(ILK_DPFC_CONTROL);
1619 dev_priv->fbc.false_color = val;
1620
1621 I915_WRITE(ILK_DPFC_CONTROL, val ?
1622 (reg | FBC_CTL_FALSE_COLOR) :
1623 (reg & ~FBC_CTL_FALSE_COLOR));
1624
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001625 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001626 return 0;
1627}
1628
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001629DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1630 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001631 "%llu\n");
1632
Paulo Zanoni92d44622013-05-31 16:33:24 -03001633static int i915_ips_status(struct seq_file *m, void *unused)
1634{
David Weinehall36cdd012016-08-22 13:59:31 +03001635 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001636 intel_wakeref_t wakeref;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001637
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001638 if (!HAS_IPS(dev_priv))
1639 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001640
Chris Wilsona0371212019-01-14 14:21:14 +00001641 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001642
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001643 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001644 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001645
David Weinehall36cdd012016-08-22 13:59:31 +03001646 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001647 seq_puts(m, "Currently: unknown\n");
1648 } else {
1649 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1650 seq_puts(m, "Currently: enabled\n");
1651 else
1652 seq_puts(m, "Currently: disabled\n");
1653 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001654
Chris Wilsona0371212019-01-14 14:21:14 +00001655 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001656
Paulo Zanoni92d44622013-05-31 16:33:24 -03001657 return 0;
1658}
1659
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001660static int i915_sr_status(struct seq_file *m, void *unused)
1661{
David Weinehall36cdd012016-08-22 13:59:31 +03001662 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001663 intel_wakeref_t wakeref;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001664 bool sr_enabled = false;
1665
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001666 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001667
Chris Wilson7342a722017-03-09 14:20:49 +00001668 if (INTEL_GEN(dev_priv) >= 9)
1669 /* no global SR status; inspect per-plane WM */;
1670 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001671 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001672 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001673 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001674 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001675 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001676 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001677 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001678 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001679 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001680 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001681
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001682 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001683
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001684 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001685
1686 return 0;
1687}
1688
Jesse Barnes7648fa92010-05-20 14:28:11 -07001689static int i915_emon_status(struct seq_file *m, void *unused)
1690{
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001691 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001692 intel_wakeref_t wakeref;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001693
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001694 if (!IS_GEN(i915, 5))
Chris Wilson582be6b2012-04-30 19:35:02 +01001695 return -ENODEV;
1696
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001697 with_intel_runtime_pm(i915, wakeref) {
1698 unsigned long temp, chipset, gfx;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001699
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001700 temp = i915_mch_val(i915);
1701 chipset = i915_chipset_val(i915);
1702 gfx = i915_gfx_val(i915);
Chris Wilsona0371212019-01-14 14:21:14 +00001703
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001704 seq_printf(m, "GMCH temp: %ld\n", temp);
1705 seq_printf(m, "Chipset power: %ld\n", chipset);
1706 seq_printf(m, "GFX power: %ld\n", gfx);
1707 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1708 }
Jesse Barnes7648fa92010-05-20 14:28:11 -07001709
1710 return 0;
1711}
1712
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001713static int i915_ring_freq_table(struct seq_file *m, void *unused)
1714{
David Weinehall36cdd012016-08-22 13:59:31 +03001715 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001716 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301717 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001718 intel_wakeref_t wakeref;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001719 int gpu_freq, ia_freq;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001720
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001721 if (!HAS_LLC(dev_priv))
1722 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001723
Chris Wilsond586b5f2018-03-08 14:26:48 +00001724 min_gpu_freq = rps->min_freq;
1725 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001726 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301727 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001728 min_gpu_freq /= GEN9_FREQ_SCALER;
1729 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301730 }
1731
Damien Lespiau267f0c92013-06-24 22:59:48 +01001732 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001733
Chris Wilsonebb5eb72019-04-26 09:17:21 +01001734 wakeref = intel_runtime_pm_get(dev_priv);
Akash Goelf936ec32015-06-29 14:50:22 +05301735 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001736 ia_freq = gpu_freq;
1737 sandybridge_pcode_read(dev_priv,
1738 GEN6_PCODE_READ_MIN_FREQ_TABLE,
Ville Syrjäläd284d512019-05-21 19:40:24 +03001739 &ia_freq, NULL);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001740 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301741 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001742 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001743 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001744 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001745 ((ia_freq >> 0) & 0xff) * 100,
1746 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001747 }
Chris Wilsona0371212019-01-14 14:21:14 +00001748 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilsonebb5eb72019-04-26 09:17:21 +01001749
1750 return 0;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001751}
1752
Chris Wilson44834a62010-08-19 16:09:23 +01001753static int i915_opregion(struct seq_file *m, void *unused)
1754{
David Weinehall36cdd012016-08-22 13:59:31 +03001755 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1756 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001757 struct intel_opregion *opregion = &dev_priv->opregion;
1758 int ret;
1759
1760 ret = mutex_lock_interruptible(&dev->struct_mutex);
1761 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001762 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001763
Jani Nikula2455a8e2015-12-14 12:50:53 +02001764 if (opregion->header)
1765 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001766
1767 mutex_unlock(&dev->struct_mutex);
1768
Daniel Vetter0d38f002012-04-21 22:49:10 +02001769out:
Chris Wilson44834a62010-08-19 16:09:23 +01001770 return 0;
1771}
1772
Jani Nikulaada8f952015-12-15 13:17:12 +02001773static int i915_vbt(struct seq_file *m, void *unused)
1774{
David Weinehall36cdd012016-08-22 13:59:31 +03001775 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001776
1777 if (opregion->vbt)
1778 seq_write(m, opregion->vbt, opregion->vbt_size);
1779
1780 return 0;
1781}
1782
Chris Wilson37811fc2010-08-25 22:45:57 +01001783static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1784{
David Weinehall36cdd012016-08-22 13:59:31 +03001785 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1786 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301787 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001788 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001789 int ret;
1790
1791 ret = mutex_lock_interruptible(&dev->struct_mutex);
1792 if (ret)
1793 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001794
Daniel Vetter06957262015-08-10 13:34:08 +02001795#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001796 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001797 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001798
Chris Wilson25bcce92016-07-02 15:36:00 +01001799 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1800 fbdev_fb->base.width,
1801 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001802 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001803 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001804 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001805 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001806 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001807 seq_putc(m, '\n');
1808 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001809#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001810
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001811 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001812 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301813 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1814 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001815 continue;
1816
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001817 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001818 fb->base.width,
1819 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001820 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001821 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001822 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001823 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001824 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001825 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001826 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001827 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001828 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001829
1830 return 0;
1831}
1832
Chris Wilson7e37f882016-08-02 22:50:21 +01001833static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001834{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001835 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1836 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001837}
1838
Ben Widawskye76d3632011-03-19 18:14:29 -07001839static int i915_context_status(struct seq_file *m, void *unused)
1840{
David Weinehall36cdd012016-08-22 13:59:31 +03001841 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1842 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone2efd132016-05-24 14:53:34 +01001843 struct i915_gem_context *ctx;
Dave Gordonc3232b12016-03-23 18:19:53 +00001844 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001845
Daniel Vetterf3d28872014-05-29 23:23:08 +02001846 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001847 if (ret)
1848 return ret;
1849
Chris Wilson829a0af2017-06-20 12:05:45 +01001850 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson02684442019-04-26 17:33:35 +01001851 struct i915_gem_engines_iter it;
Chris Wilson7e3d9a52019-03-08 13:25:16 +00001852 struct intel_context *ce;
1853
Chris Wilson288f1ce2018-09-04 16:31:17 +01001854 seq_puts(m, "HW context ");
1855 if (!list_empty(&ctx->hw_id_link))
1856 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1857 atomic_read(&ctx->hw_id_pin_count));
Chris Wilsonc84455b2016-08-15 10:49:08 +01001858 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001859 struct task_struct *task;
1860
Chris Wilsonc84455b2016-08-15 10:49:08 +01001861 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001862 if (task) {
1863 seq_printf(m, "(%s [%d]) ",
1864 task->comm, task->pid);
1865 put_task_struct(task);
1866 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001867 } else if (IS_ERR(ctx->file_priv)) {
1868 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001869 } else {
1870 seq_puts(m, "(kernel) ");
1871 }
1872
Chris Wilsonbca44d82016-05-24 14:53:41 +01001873 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1874 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001875
Chris Wilson02684442019-04-26 17:33:35 +01001876 for_each_gem_engine(ce,
1877 i915_gem_context_lock_engines(ctx), it) {
Chris Wilson7e3d9a52019-03-08 13:25:16 +00001878 seq_printf(m, "%s: ", ce->engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001879 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001880 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001881 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001882 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001883 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001884 }
Chris Wilson02684442019-04-26 17:33:35 +01001885 i915_gem_context_unlock_engines(ctx);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001886
Ben Widawskya33afea2013-09-17 21:12:45 -07001887 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001888 }
1889
Daniel Vetterf3d28872014-05-29 23:23:08 +02001890 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001891
1892 return 0;
1893}
1894
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001895static const char *swizzle_string(unsigned swizzle)
1896{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001897 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001898 case I915_BIT_6_SWIZZLE_NONE:
1899 return "none";
1900 case I915_BIT_6_SWIZZLE_9:
1901 return "bit9";
1902 case I915_BIT_6_SWIZZLE_9_10:
1903 return "bit9/bit10";
1904 case I915_BIT_6_SWIZZLE_9_11:
1905 return "bit9/bit11";
1906 case I915_BIT_6_SWIZZLE_9_10_11:
1907 return "bit9/bit10/bit11";
1908 case I915_BIT_6_SWIZZLE_9_17:
1909 return "bit9/bit17";
1910 case I915_BIT_6_SWIZZLE_9_10_17:
1911 return "bit9/bit10/bit17";
1912 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001913 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001914 }
1915
1916 return "bug";
1917}
1918
1919static int i915_swizzle_info(struct seq_file *m, void *data)
1920{
David Weinehall36cdd012016-08-22 13:59:31 +03001921 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001922 intel_wakeref_t wakeref;
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001923
Chris Wilsona0371212019-01-14 14:21:14 +00001924 wakeref = intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001925
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001926 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1927 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1928 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1929 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1930
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08001931 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001932 seq_printf(m, "DDC = 0x%08x\n",
1933 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01001934 seq_printf(m, "DDC2 = 0x%08x\n",
1935 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001936 seq_printf(m, "C0DRB3 = 0x%04x\n",
1937 I915_READ16(C0DRB3));
1938 seq_printf(m, "C1DRB3 = 0x%04x\n",
1939 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03001940 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001941 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1942 I915_READ(MAD_DIMM_C0));
1943 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1944 I915_READ(MAD_DIMM_C1));
1945 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1946 I915_READ(MAD_DIMM_C2));
1947 seq_printf(m, "TILECTL = 0x%08x\n",
1948 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03001949 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07001950 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1951 I915_READ(GAMTARBMODE));
1952 else
1953 seq_printf(m, "ARB_MODE = 0x%08x\n",
1954 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001955 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1956 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001957 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001958
1959 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1960 seq_puts(m, "L-shaped memory detected\n");
1961
Chris Wilsona0371212019-01-14 14:21:14 +00001962 intel_runtime_pm_put(dev_priv, wakeref);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001963
1964 return 0;
1965}
1966
Chris Wilson7466c292016-08-15 09:49:33 +01001967static const char *rps_power_to_str(unsigned int power)
1968{
1969 static const char * const strings[] = {
1970 [LOW_POWER] = "low power",
1971 [BETWEEN] = "mixed",
1972 [HIGH_POWER] = "high power",
1973 };
1974
1975 if (power >= ARRAY_SIZE(strings) || !strings[power])
1976 return "unknown";
1977
1978 return strings[power];
1979}
1980
Chris Wilson1854d5c2015-04-07 16:20:32 +01001981static int i915_rps_boost_info(struct seq_file *m, void *data)
1982{
David Weinehall36cdd012016-08-22 13:59:31 +03001983 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001984 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001985 u32 act_freq = rps->cur_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001986 intel_wakeref_t wakeref;
Chris Wilson1854d5c2015-04-07 16:20:32 +01001987
Chris Wilsond4225a52019-01-14 14:21:23 +00001988 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001989 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Chris Wilson337fa6e2019-04-26 09:17:20 +01001990 vlv_punit_get(dev_priv);
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001991 act_freq = vlv_punit_read(dev_priv,
1992 PUNIT_REG_GPU_FREQ_STS);
Chris Wilson337fa6e2019-04-26 09:17:20 +01001993 vlv_punit_put(dev_priv);
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001994 act_freq = (act_freq >> 8) & 0xff;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001995 } else {
1996 act_freq = intel_get_cagf(dev_priv,
1997 I915_READ(GEN6_RPSTAT1));
1998 }
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001999 }
2000
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002001 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson79ffac852019-04-24 21:07:17 +01002002 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002003 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002004 atomic_read(&rps->num_waiters));
Chris Wilson60548c52018-07-31 14:26:29 +01002005 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002006 seq_printf(m, "Frequency requested %d, actual %d\n",
2007 intel_gpu_freq(dev_priv, rps->cur_freq),
2008 intel_gpu_freq(dev_priv, act_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002009 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002010 intel_gpu_freq(dev_priv, rps->min_freq),
2011 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2012 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2013 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002014 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002015 intel_gpu_freq(dev_priv, rps->idle_freq),
2016 intel_gpu_freq(dev_priv, rps->efficient_freq),
2017 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002018
Chris Wilson62eb3c22019-02-13 09:25:04 +00002019 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002020
Chris Wilson79ffac852019-04-24 21:07:17 +01002021 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
Chris Wilson7466c292016-08-15 09:49:33 +01002022 u32 rpup, rpupei;
2023 u32 rpdown, rpdownei;
2024
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07002025 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson7466c292016-08-15 09:49:33 +01002026 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2027 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2028 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2029 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07002030 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson7466c292016-08-15 09:49:33 +01002031
2032 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Chris Wilson60548c52018-07-31 14:26:29 +01002033 rps_power_to_str(rps->power.mode));
Chris Wilson7466c292016-08-15 09:49:33 +01002034 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002035 rpup && rpupei ? 100 * rpup / rpupei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002036 rps->power.up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002037 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002038 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002039 rps->power.down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002040 } else {
2041 seq_puts(m, "\nRPS Autotuning inactive\n");
2042 }
2043
Chris Wilson8d3afd72015-05-21 21:01:47 +01002044 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002045}
2046
Ben Widawsky63573eb2013-07-04 11:02:07 -07002047static int i915_llc(struct seq_file *m, void *data)
2048{
David Weinehall36cdd012016-08-22 13:59:31 +03002049 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002050 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002051
David Weinehall36cdd012016-08-22 13:59:31 +03002052 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Daniele Ceraolo Spuriof6ac9932019-03-28 10:45:32 -07002053 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
2054 dev_priv->edram_size_mb);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002055
2056 return 0;
2057}
2058
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002059static int i915_huc_load_status_info(struct seq_file *m, void *data)
2060{
2061 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002062 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002063 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002064
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002065 if (!HAS_HUC(dev_priv))
2066 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002067
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002068 p = drm_seq_file_printer(m);
2069 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002070
Chris Wilsond4225a52019-01-14 14:21:23 +00002071 with_intel_runtime_pm(dev_priv, wakeref)
2072 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002073
2074 return 0;
2075}
2076
Alex Daifdf5d352015-08-12 15:43:37 +01002077static int i915_guc_load_status_info(struct seq_file *m, void *data)
2078{
David Weinehall36cdd012016-08-22 13:59:31 +03002079 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002080 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002081 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002082
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002083 if (!HAS_GUC(dev_priv))
2084 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002085
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002086 p = drm_seq_file_printer(m);
2087 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002088
Chris Wilsond4225a52019-01-14 14:21:23 +00002089 with_intel_runtime_pm(dev_priv, wakeref) {
2090 u32 tmp = I915_READ(GUC_STATUS);
2091 u32 i;
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302092
Chris Wilsond4225a52019-01-14 14:21:23 +00002093 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2094 seq_printf(m, "\tBootrom status = 0x%x\n",
2095 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2096 seq_printf(m, "\tuKernel status = 0x%x\n",
2097 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2098 seq_printf(m, "\tMIA Core status = 0x%x\n",
2099 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2100 seq_puts(m, "\nScratch registers:\n");
2101 for (i = 0; i < 16; i++) {
2102 seq_printf(m, "\t%2d: \t0x%x\n",
2103 i, I915_READ(SOFT_SCRATCH(i)));
2104 }
2105 }
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302106
Alex Daifdf5d352015-08-12 15:43:37 +01002107 return 0;
2108}
2109
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002110static const char *
2111stringify_guc_log_type(enum guc_log_buffer_type type)
2112{
2113 switch (type) {
2114 case GUC_ISR_LOG_BUFFER:
2115 return "ISR";
2116 case GUC_DPC_LOG_BUFFER:
2117 return "DPC";
2118 case GUC_CRASH_DUMP_LOG_BUFFER:
2119 return "CRASH";
2120 default:
2121 MISSING_CASE(type);
2122 }
2123
2124 return "";
2125}
2126
Akash Goel5aa1ee42016-10-12 21:54:36 +05302127static void i915_guc_log_info(struct seq_file *m,
2128 struct drm_i915_private *dev_priv)
2129{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002130 struct intel_guc_log *log = &dev_priv->guc.log;
2131 enum guc_log_buffer_type type;
2132
2133 if (!intel_guc_log_relay_enabled(log)) {
2134 seq_puts(m, "GuC log relay disabled\n");
2135 return;
2136 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302137
Michał Winiarskidb557992018-03-19 10:53:43 +01002138 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302139
Michał Winiarski6a96be22018-03-19 10:53:42 +01002140 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002141 log->relay.full_count);
2142
2143 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2144 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2145 stringify_guc_log_type(type),
2146 log->stats[type].flush,
2147 log->stats[type].sampled_overflow);
2148 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302149}
2150
Dave Gordon8b417c22015-08-12 15:43:44 +01002151static void i915_guc_client_info(struct seq_file *m,
2152 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302153 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002154{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002155 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002156 enum intel_engine_id id;
Jani Nikulae5315212019-01-16 11:15:23 +02002157 u64 tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002158
Oscar Mateob09935a2017-03-22 10:39:53 -07002159 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2160 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002161 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2162 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002163
Akash Goel3b3f1652016-10-13 22:44:48 +05302164 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002165 u64 submissions = client->submissions[id];
2166 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002167 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002168 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002169 }
2170 seq_printf(m, "\tTotal: %llu\n", tot);
2171}
2172
2173static int i915_guc_info(struct seq_file *m, void *data)
2174{
David Weinehall36cdd012016-08-22 13:59:31 +03002175 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002176 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002177
Michał Winiarskidb557992018-03-19 10:53:43 +01002178 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002179 return -ENODEV;
2180
Michał Winiarskidb557992018-03-19 10:53:43 +01002181 i915_guc_log_info(m, dev_priv);
2182
2183 if (!USES_GUC_SUBMISSION(dev_priv))
2184 return 0;
2185
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002186 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002187
Michał Winiarskidb557992018-03-19 10:53:43 +01002188 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002189 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002190 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002191
Chris Wilson334636c2016-11-29 12:10:20 +00002192 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2193 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002194 if (guc->preempt_client) {
2195 seq_printf(m, "\nGuC preempt client @ %p:\n",
2196 guc->preempt_client);
2197 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2198 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002199
2200 /* Add more as required ... */
2201
2202 return 0;
2203}
2204
Oscar Mateoa8b93702017-05-10 15:04:51 +00002205static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002206{
David Weinehall36cdd012016-08-22 13:59:31 +03002207 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002208 const struct intel_guc *guc = &dev_priv->guc;
2209 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302210 struct intel_guc_client *client = guc->execbuf_client;
Chris Wilson3a891a62019-04-01 17:26:39 +01002211 intel_engine_mask_t tmp;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002212 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002213
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002214 if (!USES_GUC_SUBMISSION(dev_priv))
2215 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002216
Oscar Mateoa8b93702017-05-10 15:04:51 +00002217 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2218 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002219
Oscar Mateoa8b93702017-05-10 15:04:51 +00002220 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2221 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002222
Oscar Mateoa8b93702017-05-10 15:04:51 +00002223 seq_printf(m, "GuC stage descriptor %u:\n", index);
2224 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2225 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2226 seq_printf(m, "\tPriority: %d\n", desc->priority);
2227 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2228 seq_printf(m, "\tEngines used: 0x%x\n",
2229 desc->engines_used);
2230 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2231 desc->db_trigger_phy,
2232 desc->db_trigger_cpu,
2233 desc->db_trigger_uk);
2234 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2235 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002236 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002237 desc->wq_addr, desc->wq_size);
2238 seq_putc(m, '\n');
2239
2240 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2241 u32 guc_engine_id = engine->guc_id;
2242 struct guc_execlist_context *lrc =
2243 &desc->lrc[guc_engine_id];
2244
2245 seq_printf(m, "\t%s LRC:\n", engine->name);
2246 seq_printf(m, "\t\tContext desc: 0x%x\n",
2247 lrc->context_desc);
2248 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2249 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2250 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2251 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2252 seq_putc(m, '\n');
2253 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002254 }
2255
Oscar Mateoa8b93702017-05-10 15:04:51 +00002256 return 0;
2257}
2258
Alex Dai4c7e77f2015-08-12 15:43:40 +01002259static int i915_guc_log_dump(struct seq_file *m, void *data)
2260{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002261 struct drm_info_node *node = m->private;
2262 struct drm_i915_private *dev_priv = node_to_i915(node);
2263 bool dump_load_err = !!node->info_ent->data;
2264 struct drm_i915_gem_object *obj = NULL;
2265 u32 *log;
2266 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002267
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002268 if (!HAS_GUC(dev_priv))
2269 return -ENODEV;
2270
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002271 if (dump_load_err)
2272 obj = dev_priv->guc.load_err_log;
2273 else if (dev_priv->guc.log.vma)
2274 obj = dev_priv->guc.log.vma->obj;
2275
2276 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002277 return 0;
2278
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002279 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2280 if (IS_ERR(log)) {
2281 DRM_DEBUG("Failed to pin object\n");
2282 seq_puts(m, "(log data unaccessible)\n");
2283 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002284 }
2285
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002286 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2287 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2288 *(log + i), *(log + i + 1),
2289 *(log + i + 2), *(log + i + 3));
2290
Alex Dai4c7e77f2015-08-12 15:43:40 +01002291 seq_putc(m, '\n');
2292
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002293 i915_gem_object_unpin_map(obj);
2294
Alex Dai4c7e77f2015-08-12 15:43:40 +01002295 return 0;
2296}
2297
Michał Winiarski4977a282018-03-19 10:53:40 +01002298static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302299{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002300 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302301
Michał Winiarski86aa8242018-03-08 16:46:53 +01002302 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002303 return -ENODEV;
2304
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002305 *val = intel_guc_log_get_level(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302306
2307 return 0;
2308}
2309
Michał Winiarski4977a282018-03-19 10:53:40 +01002310static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302311{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002312 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302313
Michał Winiarski86aa8242018-03-08 16:46:53 +01002314 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002315 return -ENODEV;
2316
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002317 return intel_guc_log_set_level(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302318}
2319
Michał Winiarski4977a282018-03-19 10:53:40 +01002320DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2321 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302322 "%lld\n");
2323
Michał Winiarski4977a282018-03-19 10:53:40 +01002324static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2325{
2326 struct drm_i915_private *dev_priv = inode->i_private;
2327
2328 if (!USES_GUC(dev_priv))
2329 return -ENODEV;
2330
2331 file->private_data = &dev_priv->guc.log;
2332
2333 return intel_guc_log_relay_open(&dev_priv->guc.log);
2334}
2335
2336static ssize_t
2337i915_guc_log_relay_write(struct file *filp,
2338 const char __user *ubuf,
2339 size_t cnt,
2340 loff_t *ppos)
2341{
2342 struct intel_guc_log *log = filp->private_data;
2343
2344 intel_guc_log_relay_flush(log);
2345
2346 return cnt;
2347}
2348
2349static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2350{
2351 struct drm_i915_private *dev_priv = inode->i_private;
2352
2353 intel_guc_log_relay_close(&dev_priv->guc.log);
2354
2355 return 0;
2356}
2357
2358static const struct file_operations i915_guc_log_relay_fops = {
2359 .owner = THIS_MODULE,
2360 .open = i915_guc_log_relay_open,
2361 .write = i915_guc_log_relay_write,
2362 .release = i915_guc_log_relay_release,
2363};
2364
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002365static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2366{
2367 u8 val;
2368 static const char * const sink_status[] = {
2369 "inactive",
2370 "transition to active, capture and display",
2371 "active, display from RFB",
2372 "active, capture and display on sink device timings",
2373 "transition to inactive, capture and display, timing re-sync",
2374 "reserved",
2375 "reserved",
2376 "sink internal error",
2377 };
2378 struct drm_connector *connector = m->private;
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002379 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002380 struct intel_dp *intel_dp =
2381 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002382 int ret;
2383
2384 if (!CAN_PSR(dev_priv)) {
2385 seq_puts(m, "PSR Unsupported\n");
2386 return -ENODEV;
2387 }
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002388
2389 if (connector->status != connector_status_connected)
2390 return -ENODEV;
2391
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002392 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2393
2394 if (ret == 1) {
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002395 const char *str = "unknown";
2396
2397 val &= DP_PSR_SINK_STATE_MASK;
2398 if (val < ARRAY_SIZE(sink_status))
2399 str = sink_status[val];
2400 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2401 } else {
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002402 return ret;
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002403 }
2404
2405 return 0;
2406}
2407DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2408
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302409static void
2410psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002411{
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002412 u32 val, status_val;
2413 const char *status = "unknown";
Chris Wilsonb86bef202017-01-16 13:06:21 +00002414
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302415 if (dev_priv->psr.psr2_enabled) {
2416 static const char * const live_status[] = {
2417 "IDLE",
2418 "CAPTURE",
2419 "CAPTURE_FS",
2420 "SLEEP",
2421 "BUFON_FW",
2422 "ML_UP",
2423 "SU_STANDBY",
2424 "FAST_SLEEP",
2425 "DEEP_SLEEP",
2426 "BUF_ON",
2427 "TG_ON"
2428 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002429 val = I915_READ(EDP_PSR2_STATUS);
2430 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2431 EDP_PSR2_STATUS_STATE_SHIFT;
2432 if (status_val < ARRAY_SIZE(live_status))
2433 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302434 } else {
2435 static const char * const live_status[] = {
2436 "IDLE",
2437 "SRDONACK",
2438 "SRDENT",
2439 "BUFOFF",
2440 "BUFON",
2441 "AUXACK",
2442 "SRDOFFACK",
2443 "SRDENT_ON",
2444 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002445 val = I915_READ(EDP_PSR_STATUS);
2446 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2447 EDP_PSR_STATUS_STATE_SHIFT;
2448 if (status_val < ARRAY_SIZE(live_status))
2449 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302450 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002451
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002452 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
Chris Wilsonb86bef202017-01-16 13:06:21 +00002453}
2454
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002455static int i915_edp_psr_status(struct seq_file *m, void *data)
2456{
David Weinehall36cdd012016-08-22 13:59:31 +03002457 struct drm_i915_private *dev_priv = node_to_i915(m->private);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002458 struct i915_psr *psr = &dev_priv->psr;
Chris Wilsona0371212019-01-14 14:21:14 +00002459 intel_wakeref_t wakeref;
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002460 const char *status;
2461 bool enabled;
2462 u32 val;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002463
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002464 if (!HAS_PSR(dev_priv))
2465 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002466
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002467 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2468 if (psr->dp)
2469 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2470 seq_puts(m, "\n");
2471
2472 if (!psr->sink_support)
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002473 return 0;
2474
Chris Wilsona0371212019-01-14 14:21:14 +00002475 wakeref = intel_runtime_pm_get(dev_priv);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002476 mutex_lock(&psr->lock);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002477
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002478 if (psr->enabled)
2479 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002480 else
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002481 status = "disabled";
2482 seq_printf(m, "PSR mode: %s\n", status);
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002483
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002484 if (!psr->enabled)
2485 goto unlock;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002486
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002487 if (psr->psr2_enabled) {
2488 val = I915_READ(EDP_PSR2_CTL);
2489 enabled = val & EDP_PSR2_ENABLE;
2490 } else {
2491 val = I915_READ(EDP_PSR_CTL);
2492 enabled = val & EDP_PSR_ENABLE;
2493 }
2494 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2495 enableddisabled(enabled), val);
2496 psr_source_status(dev_priv, m);
2497 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2498 psr->busy_frontbuffer_bits);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002499
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002500 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002501 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2502 */
David Weinehall36cdd012016-08-22 13:59:31 +03002503 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002504 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2505 seq_printf(m, "Performance counter: %u\n", val);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002506 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302507
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002508 if (psr->debug & I915_PSR_DEBUG_IRQ) {
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002509 seq_printf(m, "Last attempted entry at: %lld\n",
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002510 psr->last_entry_attempt);
2511 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002512 }
2513
José Roberto de Souzaa81f7812019-01-17 12:55:48 -08002514 if (psr->psr2_enabled) {
2515 u32 su_frames_val[3];
2516 int frame;
2517
2518 /*
2519 * Reading all 3 registers before hand to minimize crossing a
2520 * frame boundary between register reads
2521 */
2522 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2523 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2524
2525 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2526
2527 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2528 u32 su_blocks;
2529
2530 su_blocks = su_frames_val[frame / 3] &
2531 PSR2_SU_STATUS_MASK(frame);
2532 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2533 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2534 }
2535 }
2536
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002537unlock:
2538 mutex_unlock(&psr->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00002539 intel_runtime_pm_put(dev_priv, wakeref);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002540
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002541 return 0;
2542}
2543
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002544static int
2545i915_edp_psr_debug_set(void *data, u64 val)
2546{
2547 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00002548 intel_wakeref_t wakeref;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002549 int ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002550
2551 if (!CAN_PSR(dev_priv))
2552 return -ENODEV;
2553
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002554 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002555
Chris Wilsona0371212019-01-14 14:21:14 +00002556 wakeref = intel_runtime_pm_get(dev_priv);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002557
José Roberto de Souza23ec9f52019-02-06 13:18:45 -08002558 ret = intel_psr_debug_set(dev_priv, val);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002559
Chris Wilsona0371212019-01-14 14:21:14 +00002560 intel_runtime_pm_put(dev_priv, wakeref);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002561
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002562 return ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002563}
2564
2565static int
2566i915_edp_psr_debug_get(void *data, u64 *val)
2567{
2568 struct drm_i915_private *dev_priv = data;
2569
2570 if (!CAN_PSR(dev_priv))
2571 return -ENODEV;
2572
2573 *val = READ_ONCE(dev_priv->psr.debug);
2574 return 0;
2575}
2576
2577DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2578 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2579 "%llu\n");
2580
Jesse Barnesec013e72013-08-20 10:29:23 +01002581static int i915_energy_uJ(struct seq_file *m, void *data)
2582{
David Weinehall36cdd012016-08-22 13:59:31 +03002583 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002584 unsigned long long power;
Chris Wilsona0371212019-01-14 14:21:14 +00002585 intel_wakeref_t wakeref;
Jesse Barnesec013e72013-08-20 10:29:23 +01002586 u32 units;
2587
David Weinehall36cdd012016-08-22 13:59:31 +03002588 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002589 return -ENODEV;
2590
Chris Wilsond4225a52019-01-14 14:21:23 +00002591 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002592 return -ENODEV;
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002593
2594 units = (power & 0x1f00) >> 8;
Chris Wilsond4225a52019-01-14 14:21:23 +00002595 with_intel_runtime_pm(dev_priv, wakeref)
2596 power = I915_READ(MCH_SECP_NRG_STTS);
2597
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002598 power = (1000000 * power) >> units; /* convert to uJ */
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002599 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002600
2601 return 0;
2602}
2603
Damien Lespiau6455c872015-06-04 18:23:57 +01002604static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002605{
David Weinehall36cdd012016-08-22 13:59:31 +03002606 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002607 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002608
Chris Wilsona156e642016-04-03 14:14:21 +01002609 if (!HAS_RUNTIME_PM(dev_priv))
2610 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002611
Chris Wilson25c896bd2019-01-14 14:21:25 +00002612 seq_printf(m, "Runtime power status: %s\n",
2613 enableddisabled(!dev_priv->power_domains.wakeref));
2614
Chris Wilsond9948a12019-02-28 10:20:35 +00002615 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
Paulo Zanoni371db662013-08-19 13:18:10 -03002616 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002617 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002618#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002619 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002620 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002621#else
2622 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2623#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002624 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002625 pci_power_name(pdev->current_state),
2626 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002627
Chris Wilsonbd780f32019-01-14 14:21:09 +00002628 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2629 struct drm_printer p = drm_seq_file_printer(m);
2630
2631 print_intel_runtime_pm_wakeref(dev_priv, &p);
2632 }
2633
Jesse Barnesec013e72013-08-20 10:29:23 +01002634 return 0;
2635}
2636
Imre Deak1da51582013-11-25 17:15:35 +02002637static int i915_power_domain_info(struct seq_file *m, void *unused)
2638{
David Weinehall36cdd012016-08-22 13:59:31 +03002639 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002640 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2641 int i;
2642
2643 mutex_lock(&power_domains->lock);
2644
2645 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2646 for (i = 0; i < power_domains->power_well_count; i++) {
2647 struct i915_power_well *power_well;
2648 enum intel_display_power_domain power_domain;
2649
2650 power_well = &power_domains->power_wells[i];
Imre Deakf28ec6f2018-08-06 12:58:37 +03002651 seq_printf(m, "%-25s %d\n", power_well->desc->name,
Imre Deak1da51582013-11-25 17:15:35 +02002652 power_well->count);
2653
Imre Deakf28ec6f2018-08-06 12:58:37 +03002654 for_each_power_domain(power_domain, power_well->desc->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002655 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002656 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002657 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002658 }
2659
2660 mutex_unlock(&power_domains->lock);
2661
2662 return 0;
2663}
2664
Damien Lespiaub7cec662015-10-27 14:47:01 +02002665static int i915_dmc_info(struct seq_file *m, void *unused)
2666{
David Weinehall36cdd012016-08-22 13:59:31 +03002667 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002668 intel_wakeref_t wakeref;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002669 struct intel_csr *csr;
2670
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002671 if (!HAS_CSR(dev_priv))
2672 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002673
2674 csr = &dev_priv->csr;
2675
Chris Wilsona0371212019-01-14 14:21:14 +00002676 wakeref = intel_runtime_pm_get(dev_priv);
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002677
Damien Lespiaub7cec662015-10-27 14:47:01 +02002678 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2679 seq_printf(m, "path: %s\n", csr->fw_path);
2680
2681 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002682 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002683
2684 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2685 CSR_VERSION_MINOR(csr->version));
2686
Imre Deak34b2f8d2018-10-31 22:02:20 +02002687 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2688 goto out;
2689
2690 seq_printf(m, "DC3 -> DC5 count: %d\n",
2691 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2692 SKL_CSR_DC3_DC5_COUNT));
2693 if (!IS_GEN9_LP(dev_priv))
Damien Lespiau83372062015-10-30 17:53:32 +02002694 seq_printf(m, "DC5 -> DC6 count: %d\n",
2695 I915_READ(SKL_CSR_DC5_DC6_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002696
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002697out:
2698 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2699 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2700 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2701
Chris Wilsona0371212019-01-14 14:21:14 +00002702 intel_runtime_pm_put(dev_priv, wakeref);
Damien Lespiau83372062015-10-30 17:53:32 +02002703
Damien Lespiaub7cec662015-10-27 14:47:01 +02002704 return 0;
2705}
2706
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002707static void intel_seq_print_mode(struct seq_file *m, int tabs,
2708 struct drm_display_mode *mode)
2709{
2710 int i;
2711
2712 for (i = 0; i < tabs; i++)
2713 seq_putc(m, '\t');
2714
Shayenne Moura4fb6bb82018-12-20 10:27:57 -02002715 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002716}
2717
2718static void intel_encoder_info(struct seq_file *m,
2719 struct intel_crtc *intel_crtc,
2720 struct intel_encoder *intel_encoder)
2721{
David Weinehall36cdd012016-08-22 13:59:31 +03002722 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2723 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002724 struct drm_crtc *crtc = &intel_crtc->base;
2725 struct intel_connector *intel_connector;
2726 struct drm_encoder *encoder;
2727
2728 encoder = &intel_encoder->base;
2729 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002730 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002731 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2732 struct drm_connector *connector = &intel_connector->base;
2733 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2734 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002735 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002736 drm_get_connector_status_name(connector->status));
2737 if (connector->status == connector_status_connected) {
2738 struct drm_display_mode *mode = &crtc->mode;
2739 seq_printf(m, ", mode:\n");
2740 intel_seq_print_mode(m, 2, mode);
2741 } else {
2742 seq_putc(m, '\n');
2743 }
2744 }
2745}
2746
2747static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2748{
David Weinehall36cdd012016-08-22 13:59:31 +03002749 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2750 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002751 struct drm_crtc *crtc = &intel_crtc->base;
2752 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002753 struct drm_plane_state *plane_state = crtc->primary->state;
2754 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002755
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002756 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002757 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002758 fb->base.id, plane_state->src_x >> 16,
2759 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002760 else
2761 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002762 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2763 intel_encoder_info(m, intel_crtc, intel_encoder);
2764}
2765
2766static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2767{
2768 struct drm_display_mode *mode = panel->fixed_mode;
2769
2770 seq_printf(m, "\tfixed mode:\n");
2771 intel_seq_print_mode(m, 2, mode);
2772}
2773
2774static void intel_dp_info(struct seq_file *m,
2775 struct intel_connector *intel_connector)
2776{
2777 struct intel_encoder *intel_encoder = intel_connector->encoder;
2778 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2779
2780 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03002781 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02002782 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002783 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03002784
2785 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2786 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002787}
2788
Libin Yang9a148a92016-11-28 20:07:05 +08002789static void intel_dp_mst_info(struct seq_file *m,
2790 struct intel_connector *intel_connector)
2791{
2792 struct intel_encoder *intel_encoder = intel_connector->encoder;
2793 struct intel_dp_mst_encoder *intel_mst =
2794 enc_to_mst(&intel_encoder->base);
2795 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2796 struct intel_dp *intel_dp = &intel_dig_port->dp;
2797 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2798 intel_connector->port);
2799
2800 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2801}
2802
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002803static void intel_hdmi_info(struct seq_file *m,
2804 struct intel_connector *intel_connector)
2805{
2806 struct intel_encoder *intel_encoder = intel_connector->encoder;
2807 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2808
Jani Nikula742f4912015-09-03 11:16:09 +03002809 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002810}
2811
2812static void intel_lvds_info(struct seq_file *m,
2813 struct intel_connector *intel_connector)
2814{
2815 intel_panel_info(m, &intel_connector->panel);
2816}
2817
2818static void intel_connector_info(struct seq_file *m,
2819 struct drm_connector *connector)
2820{
2821 struct intel_connector *intel_connector = to_intel_connector(connector);
2822 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08002823 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002824
2825 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03002826 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002827 drm_get_connector_status_name(connector->status));
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002828
2829 if (connector->status == connector_status_disconnected)
2830 return;
2831
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002832 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2833 connector->display_info.width_mm,
2834 connector->display_info.height_mm);
2835 seq_printf(m, "\tsubpixel order: %s\n",
2836 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2837 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002838
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02002839 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002840 return;
2841
2842 switch (connector->connector_type) {
2843 case DRM_MODE_CONNECTOR_DisplayPort:
2844 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08002845 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2846 intel_dp_mst_info(m, intel_connector);
2847 else
2848 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002849 break;
2850 case DRM_MODE_CONNECTOR_LVDS:
2851 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10002852 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002853 break;
2854 case DRM_MODE_CONNECTOR_HDMIA:
2855 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03002856 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002857 intel_hdmi_info(m, intel_connector);
2858 break;
2859 default:
2860 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10002861 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002862
Jesse Barnesf103fc72014-02-20 12:39:57 -08002863 seq_printf(m, "\tmodes:\n");
2864 list_for_each_entry(mode, &connector->modes, head)
2865 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002866}
2867
Robert Fekete3abc4e02015-10-27 16:58:32 +01002868static const char *plane_type(enum drm_plane_type type)
2869{
2870 switch (type) {
2871 case DRM_PLANE_TYPE_OVERLAY:
2872 return "OVL";
2873 case DRM_PLANE_TYPE_PRIMARY:
2874 return "PRI";
2875 case DRM_PLANE_TYPE_CURSOR:
2876 return "CUR";
2877 /*
2878 * Deliberately omitting default: to generate compiler warnings
2879 * when a new drm_plane_type gets added.
2880 */
2881 }
2882
2883 return "unknown";
2884}
2885
Jani Nikula5852a152019-01-07 16:51:49 +02002886static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
Robert Fekete3abc4e02015-10-27 16:58:32 +01002887{
Robert Fekete3abc4e02015-10-27 16:58:32 +01002888 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04002889 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01002890 * will print them all to visualize if the values are misused
2891 */
Jani Nikula5852a152019-01-07 16:51:49 +02002892 snprintf(buf, bufsize,
Robert Fekete3abc4e02015-10-27 16:58:32 +01002893 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04002894 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2895 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2896 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2897 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2898 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2899 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01002900 rotation);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002901}
2902
2903static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2904{
David Weinehall36cdd012016-08-22 13:59:31 +03002905 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2906 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01002907 struct intel_plane *intel_plane;
2908
2909 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2910 struct drm_plane_state *state;
2911 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002912 struct drm_format_name_buf format_name;
Jani Nikula5852a152019-01-07 16:51:49 +02002913 char rot_str[48];
Robert Fekete3abc4e02015-10-27 16:58:32 +01002914
2915 if (!plane->state) {
2916 seq_puts(m, "plane->state is NULL!\n");
2917 continue;
2918 }
2919
2920 state = plane->state;
2921
Eric Engestrom90844f02016-08-15 01:02:38 +01002922 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002923 drm_get_format_name(state->fb->format->format,
2924 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01002925 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002926 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01002927 }
2928
Jani Nikula5852a152019-01-07 16:51:49 +02002929 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2930
Robert Fekete3abc4e02015-10-27 16:58:32 +01002931 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2932 plane->base.id,
2933 plane_type(intel_plane->base.type),
2934 state->crtc_x, state->crtc_y,
2935 state->crtc_w, state->crtc_h,
2936 (state->src_x >> 16),
2937 ((state->src_x & 0xffff) * 15625) >> 10,
2938 (state->src_y >> 16),
2939 ((state->src_y & 0xffff) * 15625) >> 10,
2940 (state->src_w >> 16),
2941 ((state->src_w & 0xffff) * 15625) >> 10,
2942 (state->src_h >> 16),
2943 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002944 format_name.str,
Jani Nikula5852a152019-01-07 16:51:49 +02002945 rot_str);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002946 }
2947}
2948
2949static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2950{
2951 struct intel_crtc_state *pipe_config;
2952 int num_scalers = intel_crtc->num_scalers;
2953 int i;
2954
2955 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2956
2957 /* Not all platformas have a scaler */
2958 if (num_scalers) {
2959 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2960 num_scalers,
2961 pipe_config->scaler_state.scaler_users,
2962 pipe_config->scaler_state.scaler_id);
2963
A.Sunil Kamath58415912016-11-20 23:20:26 +05302964 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01002965 struct intel_scaler *sc =
2966 &pipe_config->scaler_state.scalers[i];
2967
2968 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2969 i, yesno(sc->in_use), sc->mode);
2970 }
2971 seq_puts(m, "\n");
2972 } else {
2973 seq_puts(m, "\tNo scalers available on this platform\n");
2974 }
2975}
2976
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002977static int i915_display_info(struct seq_file *m, void *unused)
2978{
David Weinehall36cdd012016-08-22 13:59:31 +03002979 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2980 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec2014-03-12 09:13:13 +00002981 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002982 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002983 struct drm_connector_list_iter conn_iter;
Chris Wilsona0371212019-01-14 14:21:14 +00002984 intel_wakeref_t wakeref;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002985
Chris Wilsona0371212019-01-14 14:21:14 +00002986 wakeref = intel_runtime_pm_get(dev_priv);
2987
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002988 seq_printf(m, "CRTC info\n");
2989 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01002990 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02002991 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002992
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002993 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02002994 pipe_config = to_intel_crtc_state(crtc->base.state);
2995
Robert Fekete3abc4e02015-10-27 16:58:32 +01002996 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec2014-03-12 09:13:13 +00002997 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02002998 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01002999 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3000 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3001
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003002 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003003 struct intel_plane *cursor =
3004 to_intel_plane(crtc->base.cursor);
3005
Chris Wilson065f2ec2014-03-12 09:13:13 +00003006 intel_crtc_info(m, crtc);
3007
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003008 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3009 yesno(cursor->base.state->visible),
3010 cursor->base.state->crtc_x,
3011 cursor->base.state->crtc_y,
3012 cursor->base.state->crtc_w,
3013 cursor->base.state->crtc_h,
3014 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003015 intel_scaler_info(m, crtc);
3016 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003017 }
Daniel Vettercace8412014-05-22 17:56:31 +02003018
3019 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3020 yesno(!crtc->cpu_fifo_underrun_disabled),
3021 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003022 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003023 }
3024
3025 seq_printf(m, "\n");
3026 seq_printf(m, "Connector info\n");
3027 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003028 mutex_lock(&dev->mode_config.mutex);
3029 drm_connector_list_iter_begin(dev, &conn_iter);
3030 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003031 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003032 drm_connector_list_iter_end(&conn_iter);
3033 mutex_unlock(&dev->mode_config.mutex);
3034
Chris Wilsona0371212019-01-14 14:21:14 +00003035 intel_runtime_pm_put(dev_priv, wakeref);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003036
3037 return 0;
3038}
3039
Chris Wilson1b365952016-10-04 21:11:31 +01003040static int i915_engine_info(struct seq_file *m, void *unused)
3041{
3042 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3043 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +00003044 intel_wakeref_t wakeref;
Akash Goel3b3f1652016-10-13 22:44:48 +05303045 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003046 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003047
Chris Wilsona0371212019-01-14 14:21:14 +00003048 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01003049
Chris Wilson79ffac852019-04-24 21:07:17 +01003050 seq_printf(m, "GT awake? %s [%d]\n",
3051 yesno(dev_priv->gt.awake),
3052 atomic_read(&dev_priv->gt.wakeref.count));
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003053 seq_printf(m, "CS timestamp frequency: %u kHz\n",
Jani Nikula02584042018-12-31 16:56:41 +02003054 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003055
Chris Wilsonf636edb2017-10-09 12:02:57 +01003056 p = drm_seq_file_printer(m);
3057 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003058 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003059
Chris Wilsona0371212019-01-14 14:21:14 +00003060 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilson9c870d02016-10-24 13:42:15 +01003061
Chris Wilson1b365952016-10-04 21:11:31 +01003062 return 0;
3063}
3064
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003065static int i915_rcs_topology(struct seq_file *m, void *unused)
3066{
3067 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3068 struct drm_printer p = drm_seq_file_printer(m);
3069
Jani Nikula02584042018-12-31 16:56:41 +02003070 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003071
3072 return 0;
3073}
3074
Chris Wilsonc5418a82017-10-13 21:26:19 +01003075static int i915_shrinker_info(struct seq_file *m, void *unused)
3076{
3077 struct drm_i915_private *i915 = node_to_i915(m->private);
3078
3079 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3080 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3081
3082 return 0;
3083}
3084
Daniel Vetter728e29d2014-06-25 22:01:53 +03003085static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3086{
David Weinehall36cdd012016-08-22 13:59:31 +03003087 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3088 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003089 int i;
3090
3091 drm_modeset_lock_all(dev);
3092 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3093 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3094
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003095 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003096 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003097 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003098 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003099 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003100 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003101 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003102 pll->state.hw_state.dpll_md);
3103 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3104 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3105 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07003106 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3107 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3108 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3109 pll->state.hw_state.mg_refclkin_ctl);
3110 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3111 pll->state.hw_state.mg_clktop2_coreclkctl1);
3112 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3113 pll->state.hw_state.mg_clktop2_hsclkctl);
3114 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3115 pll->state.hw_state.mg_pll_div0);
3116 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3117 pll->state.hw_state.mg_pll_div1);
3118 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3119 pll->state.hw_state.mg_pll_lf);
3120 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3121 pll->state.hw_state.mg_pll_frac_lock);
3122 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3123 pll->state.hw_state.mg_pll_ssc);
3124 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3125 pll->state.hw_state.mg_pll_bias);
3126 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3127 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003128 }
3129 drm_modeset_unlock_all(dev);
3130
3131 return 0;
3132}
3133
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003134static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003135{
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003136 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilson8a68d462019-03-05 18:03:30 +00003137 const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003138 struct i915_wa *wa;
3139 unsigned int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003140
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003141 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3142 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
Chris Wilson548764b2018-06-15 13:02:07 +01003143 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003144 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
Arun Siluvery888b5992014-08-26 14:44:51 +01003145
3146 return 0;
3147}
3148
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303149static int i915_ipc_status_show(struct seq_file *m, void *data)
3150{
3151 struct drm_i915_private *dev_priv = m->private;
3152
3153 seq_printf(m, "Isochronous Priority Control: %s\n",
3154 yesno(dev_priv->ipc_enabled));
3155 return 0;
3156}
3157
3158static int i915_ipc_status_open(struct inode *inode, struct file *file)
3159{
3160 struct drm_i915_private *dev_priv = inode->i_private;
3161
3162 if (!HAS_IPC(dev_priv))
3163 return -ENODEV;
3164
3165 return single_open(file, i915_ipc_status_show, dev_priv);
3166}
3167
3168static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3169 size_t len, loff_t *offp)
3170{
3171 struct seq_file *m = file->private_data;
3172 struct drm_i915_private *dev_priv = m->private;
Chris Wilsona0371212019-01-14 14:21:14 +00003173 intel_wakeref_t wakeref;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303174 bool enable;
Chris Wilsond4225a52019-01-14 14:21:23 +00003175 int ret;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303176
3177 ret = kstrtobool_from_user(ubuf, len, &enable);
3178 if (ret < 0)
3179 return ret;
3180
Chris Wilsond4225a52019-01-14 14:21:23 +00003181 with_intel_runtime_pm(dev_priv, wakeref) {
3182 if (!dev_priv->ipc_enabled && enable)
3183 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3184 dev_priv->wm.distrust_bios_wm = true;
3185 dev_priv->ipc_enabled = enable;
3186 intel_enable_ipc(dev_priv);
3187 }
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303188
3189 return len;
3190}
3191
3192static const struct file_operations i915_ipc_status_fops = {
3193 .owner = THIS_MODULE,
3194 .open = i915_ipc_status_open,
3195 .read = seq_read,
3196 .llseek = seq_lseek,
3197 .release = single_release,
3198 .write = i915_ipc_status_write
3199};
3200
Damien Lespiauc5511e42014-11-04 17:06:51 +00003201static int i915_ddb_info(struct seq_file *m, void *unused)
3202{
David Weinehall36cdd012016-08-22 13:59:31 +03003203 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3204 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003205 struct skl_ddb_entry *entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003206 struct intel_crtc *crtc;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003207
David Weinehall36cdd012016-08-22 13:59:31 +03003208 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003209 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003210
Damien Lespiauc5511e42014-11-04 17:06:51 +00003211 drm_modeset_lock_all(dev);
3212
Damien Lespiauc5511e42014-11-04 17:06:51 +00003213 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3214
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003215 for_each_intel_crtc(&dev_priv->drm, crtc) {
3216 struct intel_crtc_state *crtc_state =
3217 to_intel_crtc_state(crtc->base.state);
3218 enum pipe pipe = crtc->pipe;
3219 enum plane_id plane_id;
3220
Damien Lespiauc5511e42014-11-04 17:06:51 +00003221 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3222
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003223 for_each_plane_id_on_crtc(crtc, plane_id) {
3224 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3225 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
Damien Lespiauc5511e42014-11-04 17:06:51 +00003226 entry->start, entry->end,
3227 skl_ddb_entry_size(entry));
3228 }
3229
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003230 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003231 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3232 entry->end, skl_ddb_entry_size(entry));
3233 }
3234
3235 drm_modeset_unlock_all(dev);
3236
3237 return 0;
3238}
3239
Vandana Kannana54746e2015-03-03 20:53:10 +05303240static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003241 struct drm_device *dev,
3242 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303243{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003244 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303245 struct i915_drrs *drrs = &dev_priv->drrs;
3246 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003247 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003248 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303249
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003250 drm_connector_list_iter_begin(dev, &conn_iter);
3251 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003252 if (connector->state->crtc != &intel_crtc->base)
3253 continue;
3254
3255 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303256 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003257 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303258
3259 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3260 seq_puts(m, "\tVBT: DRRS_type: Static");
3261 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3262 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3263 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3264 seq_puts(m, "\tVBT: DRRS_type: None");
3265 else
3266 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3267
3268 seq_puts(m, "\n\n");
3269
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003270 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303271 struct intel_panel *panel;
3272
3273 mutex_lock(&drrs->mutex);
3274 /* DRRS Supported */
3275 seq_puts(m, "\tDRRS Supported: Yes\n");
3276
3277 /* disable_drrs() will make drrs->dp NULL */
3278 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303279 seq_puts(m, "Idleness DRRS: Disabled\n");
3280 if (dev_priv->psr.enabled)
3281 seq_puts(m,
3282 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303283 mutex_unlock(&drrs->mutex);
3284 return;
3285 }
3286
3287 panel = &drrs->dp->attached_connector->panel;
3288 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3289 drrs->busy_frontbuffer_bits);
3290
3291 seq_puts(m, "\n\t\t");
3292 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3293 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3294 vrefresh = panel->fixed_mode->vrefresh;
3295 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3296 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3297 vrefresh = panel->downclock_mode->vrefresh;
3298 } else {
3299 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3300 drrs->refresh_rate_type);
3301 mutex_unlock(&drrs->mutex);
3302 return;
3303 }
3304 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3305
3306 seq_puts(m, "\n\t\t");
3307 mutex_unlock(&drrs->mutex);
3308 } else {
3309 /* DRRS not supported. Print the VBT parameter*/
3310 seq_puts(m, "\tDRRS Supported : No");
3311 }
3312 seq_puts(m, "\n");
3313}
3314
3315static int i915_drrs_status(struct seq_file *m, void *unused)
3316{
David Weinehall36cdd012016-08-22 13:59:31 +03003317 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3318 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303319 struct intel_crtc *intel_crtc;
3320 int active_crtc_cnt = 0;
3321
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003322 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303323 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003324 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303325 active_crtc_cnt++;
3326 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3327
3328 drrs_status_per_crtc(m, dev, intel_crtc);
3329 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303330 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003331 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303332
3333 if (!active_crtc_cnt)
3334 seq_puts(m, "No active crtc found\n");
3335
3336 return 0;
3337}
3338
Dave Airlie11bed952014-05-12 15:22:27 +10003339static int i915_dp_mst_info(struct seq_file *m, void *unused)
3340{
David Weinehall36cdd012016-08-22 13:59:31 +03003341 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3342 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003343 struct intel_encoder *intel_encoder;
3344 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003345 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003346 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003347
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003348 drm_connector_list_iter_begin(dev, &conn_iter);
3349 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003350 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003351 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003352
3353 intel_encoder = intel_attached_encoder(connector);
3354 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3355 continue;
3356
3357 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003358 if (!intel_dig_port->dp.can_mst)
3359 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003360
Jim Bride40ae80c2016-04-14 10:18:37 -07003361 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003362 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003363 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3364 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003365 drm_connector_list_iter_end(&conn_iter);
3366
Dave Airlie11bed952014-05-12 15:22:27 +10003367 return 0;
3368}
3369
Todd Previteeb3394fa2015-04-18 00:04:19 -07003370static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003371 const char __user *ubuf,
3372 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003373{
3374 char *input_buffer;
3375 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003376 struct drm_device *dev;
3377 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003378 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003379 struct intel_dp *intel_dp;
3380 int val = 0;
3381
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303382 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003383
Todd Previteeb3394fa2015-04-18 00:04:19 -07003384 if (len == 0)
3385 return 0;
3386
Geliang Tang261aeba2017-05-06 23:40:17 +08003387 input_buffer = memdup_user_nul(ubuf, len);
3388 if (IS_ERR(input_buffer))
3389 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003390
Todd Previteeb3394fa2015-04-18 00:04:19 -07003391 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3392
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003393 drm_connector_list_iter_begin(dev, &conn_iter);
3394 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003395 struct intel_encoder *encoder;
3396
Todd Previteeb3394fa2015-04-18 00:04:19 -07003397 if (connector->connector_type !=
3398 DRM_MODE_CONNECTOR_DisplayPort)
3399 continue;
3400
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003401 encoder = to_intel_encoder(connector->encoder);
3402 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3403 continue;
3404
3405 if (encoder && connector->status == connector_status_connected) {
3406 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003407 status = kstrtoint(input_buffer, 10, &val);
3408 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003409 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003410 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3411 /* To prevent erroneous activation of the compliance
3412 * testing code, only accept an actual value of 1 here
3413 */
3414 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003415 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003416 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003417 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003418 }
3419 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003420 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003421 kfree(input_buffer);
3422 if (status < 0)
3423 return status;
3424
3425 *offp += len;
3426 return len;
3427}
3428
3429static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3430{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003431 struct drm_i915_private *dev_priv = m->private;
3432 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003433 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003434 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003435 struct intel_dp *intel_dp;
3436
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003437 drm_connector_list_iter_begin(dev, &conn_iter);
3438 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003439 struct intel_encoder *encoder;
3440
Todd Previteeb3394fa2015-04-18 00:04:19 -07003441 if (connector->connector_type !=
3442 DRM_MODE_CONNECTOR_DisplayPort)
3443 continue;
3444
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003445 encoder = to_intel_encoder(connector->encoder);
3446 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3447 continue;
3448
3449 if (encoder && connector->status == connector_status_connected) {
3450 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003451 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003452 seq_puts(m, "1");
3453 else
3454 seq_puts(m, "0");
3455 } else
3456 seq_puts(m, "0");
3457 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003458 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003459
3460 return 0;
3461}
3462
3463static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003464 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003465{
David Weinehall36cdd012016-08-22 13:59:31 +03003466 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003467 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003468}
3469
3470static const struct file_operations i915_displayport_test_active_fops = {
3471 .owner = THIS_MODULE,
3472 .open = i915_displayport_test_active_open,
3473 .read = seq_read,
3474 .llseek = seq_lseek,
3475 .release = single_release,
3476 .write = i915_displayport_test_active_write
3477};
3478
3479static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3480{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003481 struct drm_i915_private *dev_priv = m->private;
3482 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003483 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003484 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003485 struct intel_dp *intel_dp;
3486
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003487 drm_connector_list_iter_begin(dev, &conn_iter);
3488 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003489 struct intel_encoder *encoder;
3490
Todd Previteeb3394fa2015-04-18 00:04:19 -07003491 if (connector->connector_type !=
3492 DRM_MODE_CONNECTOR_DisplayPort)
3493 continue;
3494
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003495 encoder = to_intel_encoder(connector->encoder);
3496 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3497 continue;
3498
3499 if (encoder && connector->status == connector_status_connected) {
3500 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003501 if (intel_dp->compliance.test_type ==
3502 DP_TEST_LINK_EDID_READ)
3503 seq_printf(m, "%lx",
3504 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003505 else if (intel_dp->compliance.test_type ==
3506 DP_TEST_LINK_VIDEO_PATTERN) {
3507 seq_printf(m, "hdisplay: %d\n",
3508 intel_dp->compliance.test_data.hdisplay);
3509 seq_printf(m, "vdisplay: %d\n",
3510 intel_dp->compliance.test_data.vdisplay);
3511 seq_printf(m, "bpc: %u\n",
3512 intel_dp->compliance.test_data.bpc);
3513 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003514 } else
3515 seq_puts(m, "0");
3516 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003517 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003518
3519 return 0;
3520}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003521DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003522
3523static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3524{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003525 struct drm_i915_private *dev_priv = m->private;
3526 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003527 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003528 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003529 struct intel_dp *intel_dp;
3530
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003531 drm_connector_list_iter_begin(dev, &conn_iter);
3532 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003533 struct intel_encoder *encoder;
3534
Todd Previteeb3394fa2015-04-18 00:04:19 -07003535 if (connector->connector_type !=
3536 DRM_MODE_CONNECTOR_DisplayPort)
3537 continue;
3538
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003539 encoder = to_intel_encoder(connector->encoder);
3540 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3541 continue;
3542
3543 if (encoder && connector->status == connector_status_connected) {
3544 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003545 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003546 } else
3547 seq_puts(m, "0");
3548 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003549 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003550
3551 return 0;
3552}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003553DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003554
Jani Nikulae5315212019-01-16 11:15:23 +02003555static void wm_latency_show(struct seq_file *m, const u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003556{
David Weinehall36cdd012016-08-22 13:59:31 +03003557 struct drm_i915_private *dev_priv = m->private;
3558 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003559 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003560 int num_levels;
3561
David Weinehall36cdd012016-08-22 13:59:31 +03003562 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003563 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003564 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003565 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003566 else if (IS_G4X(dev_priv))
3567 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003568 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003569 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003570
3571 drm_modeset_lock_all(dev);
3572
3573 for (level = 0; level < num_levels; level++) {
3574 unsigned int latency = wm[level];
3575
Damien Lespiau97e94b22014-11-04 17:06:50 +00003576 /*
3577 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003578 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003579 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003580 if (INTEL_GEN(dev_priv) >= 9 ||
3581 IS_VALLEYVIEW(dev_priv) ||
3582 IS_CHERRYVIEW(dev_priv) ||
3583 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003584 latency *= 10;
3585 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003586 latency *= 5;
3587
3588 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003589 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003590 }
3591
3592 drm_modeset_unlock_all(dev);
3593}
3594
3595static int pri_wm_latency_show(struct seq_file *m, void *data)
3596{
David Weinehall36cdd012016-08-22 13:59:31 +03003597 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003598 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003599
David Weinehall36cdd012016-08-22 13:59:31 +03003600 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003601 latencies = dev_priv->wm.skl_latency;
3602 else
David Weinehall36cdd012016-08-22 13:59:31 +03003603 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003604
3605 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003606
3607 return 0;
3608}
3609
3610static int spr_wm_latency_show(struct seq_file *m, void *data)
3611{
David Weinehall36cdd012016-08-22 13:59:31 +03003612 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003613 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003614
David Weinehall36cdd012016-08-22 13:59:31 +03003615 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003616 latencies = dev_priv->wm.skl_latency;
3617 else
David Weinehall36cdd012016-08-22 13:59:31 +03003618 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003619
3620 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003621
3622 return 0;
3623}
3624
3625static int cur_wm_latency_show(struct seq_file *m, void *data)
3626{
David Weinehall36cdd012016-08-22 13:59:31 +03003627 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003628 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003629
David Weinehall36cdd012016-08-22 13:59:31 +03003630 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003631 latencies = dev_priv->wm.skl_latency;
3632 else
David Weinehall36cdd012016-08-22 13:59:31 +03003633 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003634
3635 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003636
3637 return 0;
3638}
3639
3640static int pri_wm_latency_open(struct inode *inode, struct file *file)
3641{
David Weinehall36cdd012016-08-22 13:59:31 +03003642 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003643
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003644 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003645 return -ENODEV;
3646
David Weinehall36cdd012016-08-22 13:59:31 +03003647 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003648}
3649
3650static int spr_wm_latency_open(struct inode *inode, struct file *file)
3651{
David Weinehall36cdd012016-08-22 13:59:31 +03003652 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003653
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003654 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003655 return -ENODEV;
3656
David Weinehall36cdd012016-08-22 13:59:31 +03003657 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003658}
3659
3660static int cur_wm_latency_open(struct inode *inode, struct file *file)
3661{
David Weinehall36cdd012016-08-22 13:59:31 +03003662 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003663
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003664 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003665 return -ENODEV;
3666
David Weinehall36cdd012016-08-22 13:59:31 +03003667 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003668}
3669
3670static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Jani Nikulae5315212019-01-16 11:15:23 +02003671 size_t len, loff_t *offp, u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003672{
3673 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003674 struct drm_i915_private *dev_priv = m->private;
3675 struct drm_device *dev = &dev_priv->drm;
Jani Nikulae5315212019-01-16 11:15:23 +02003676 u16 new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003677 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003678 int level;
3679 int ret;
3680 char tmp[32];
3681
David Weinehall36cdd012016-08-22 13:59:31 +03003682 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003683 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003684 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003685 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003686 else if (IS_G4X(dev_priv))
3687 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003688 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003689 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003690
Ville Syrjälä369a1342014-01-22 14:36:08 +02003691 if (len >= sizeof(tmp))
3692 return -EINVAL;
3693
3694 if (copy_from_user(tmp, ubuf, len))
3695 return -EFAULT;
3696
3697 tmp[len] = '\0';
3698
Damien Lespiau97e94b22014-11-04 17:06:50 +00003699 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3700 &new[0], &new[1], &new[2], &new[3],
3701 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003702 if (ret != num_levels)
3703 return -EINVAL;
3704
3705 drm_modeset_lock_all(dev);
3706
3707 for (level = 0; level < num_levels; level++)
3708 wm[level] = new[level];
3709
3710 drm_modeset_unlock_all(dev);
3711
3712 return len;
3713}
3714
3715
3716static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3717 size_t len, loff_t *offp)
3718{
3719 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003720 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003721 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003722
David Weinehall36cdd012016-08-22 13:59:31 +03003723 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003724 latencies = dev_priv->wm.skl_latency;
3725 else
David Weinehall36cdd012016-08-22 13:59:31 +03003726 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003727
3728 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003729}
3730
3731static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3732 size_t len, loff_t *offp)
3733{
3734 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003735 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003736 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003737
David Weinehall36cdd012016-08-22 13:59:31 +03003738 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003739 latencies = dev_priv->wm.skl_latency;
3740 else
David Weinehall36cdd012016-08-22 13:59:31 +03003741 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003742
3743 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003744}
3745
3746static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3747 size_t len, loff_t *offp)
3748{
3749 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003750 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003751 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003752
David Weinehall36cdd012016-08-22 13:59:31 +03003753 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003754 latencies = dev_priv->wm.skl_latency;
3755 else
David Weinehall36cdd012016-08-22 13:59:31 +03003756 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003757
3758 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003759}
3760
3761static const struct file_operations i915_pri_wm_latency_fops = {
3762 .owner = THIS_MODULE,
3763 .open = pri_wm_latency_open,
3764 .read = seq_read,
3765 .llseek = seq_lseek,
3766 .release = single_release,
3767 .write = pri_wm_latency_write
3768};
3769
3770static const struct file_operations i915_spr_wm_latency_fops = {
3771 .owner = THIS_MODULE,
3772 .open = spr_wm_latency_open,
3773 .read = seq_read,
3774 .llseek = seq_lseek,
3775 .release = single_release,
3776 .write = spr_wm_latency_write
3777};
3778
3779static const struct file_operations i915_cur_wm_latency_fops = {
3780 .owner = THIS_MODULE,
3781 .open = cur_wm_latency_open,
3782 .read = seq_read,
3783 .llseek = seq_lseek,
3784 .release = single_release,
3785 .write = cur_wm_latency_write
3786};
3787
Kees Cook647416f2013-03-10 14:10:06 -07003788static int
3789i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003790{
Chris Wilsonc41166f2019-02-20 14:56:37 +00003791 int ret = i915_terminally_wedged(data);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003792
Chris Wilsonc41166f2019-02-20 14:56:37 +00003793 switch (ret) {
3794 case -EIO:
3795 *val = 1;
3796 return 0;
3797 case 0:
3798 *val = 0;
3799 return 0;
3800 default:
3801 return ret;
3802 }
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003803}
3804
Kees Cook647416f2013-03-10 14:10:06 -07003805static int
3806i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003807{
Chris Wilson598b6b52017-03-25 13:47:35 +00003808 struct drm_i915_private *i915 = data;
Imre Deakd46c0512014-04-14 20:24:27 +03003809
Chris Wilson15cbf002019-02-08 15:37:06 +00003810 /* Flush any previous reset before applying for a new one */
3811 wait_event(i915->gpu_error.reset_queue,
3812 !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003813
Chris Wilsonce800752018-03-20 10:04:49 +00003814 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3815 "Manually set wedged engine mask = %llx", val);
Kees Cook647416f2013-03-10 14:10:06 -07003816 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003817}
3818
Kees Cook647416f2013-03-10 14:10:06 -07003819DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3820 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03003821 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003822
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003823#define DROP_UNBOUND BIT(0)
3824#define DROP_BOUND BIT(1)
3825#define DROP_RETIRE BIT(2)
3826#define DROP_ACTIVE BIT(3)
3827#define DROP_FREED BIT(4)
3828#define DROP_SHRINK_ALL BIT(5)
3829#define DROP_IDLE BIT(6)
Chris Wilson6b048702018-09-03 09:33:37 +01003830#define DROP_RESET_ACTIVE BIT(7)
3831#define DROP_RESET_SEQNO BIT(8)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003832#define DROP_ALL (DROP_UNBOUND | \
3833 DROP_BOUND | \
3834 DROP_RETIRE | \
3835 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00003836 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003837 DROP_SHRINK_ALL |\
Chris Wilson6b048702018-09-03 09:33:37 +01003838 DROP_IDLE | \
3839 DROP_RESET_ACTIVE | \
3840 DROP_RESET_SEQNO)
Kees Cook647416f2013-03-10 14:10:06 -07003841static int
3842i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003843{
Kees Cook647416f2013-03-10 14:10:06 -07003844 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00003845
Kees Cook647416f2013-03-10 14:10:06 -07003846 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003847}
3848
Kees Cook647416f2013-03-10 14:10:06 -07003849static int
3850i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003851{
Chris Wilson6b048702018-09-03 09:33:37 +01003852 struct drm_i915_private *i915 = data;
Chris Wilsondd624af2013-01-15 12:39:35 +00003853
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003854 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3855 val, val & DROP_ALL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003856
Chris Wilsonad4062d2019-01-28 01:02:18 +00003857 if (val & DROP_RESET_ACTIVE &&
3858 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
Chris Wilson6b048702018-09-03 09:33:37 +01003859 i915_gem_set_wedged(i915);
3860
Chris Wilsondd624af2013-01-15 12:39:35 +00003861 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3862 * on ioctls on -EAGAIN. */
Chris Wilsonba000162019-05-07 13:11:05 +01003863 if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
Chris Wilson6cffeb82019-03-18 09:51:49 +00003864 int ret;
3865
Chris Wilson6b048702018-09-03 09:33:37 +01003866 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00003867 if (ret)
Chris Wilson6cffeb82019-03-18 09:51:49 +00003868 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00003869
Chris Wilsonba000162019-05-07 13:11:05 +01003870 /*
3871 * To finish the flush of the idle_worker, we must complete
3872 * the switch-to-kernel-context, which requires a double
3873 * pass through wait_for_idle: first queues the switch,
3874 * second waits for the switch.
3875 */
3876 if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
3877 ret = i915_gem_wait_for_idle(i915,
3878 I915_WAIT_INTERRUPTIBLE |
3879 I915_WAIT_LOCKED,
3880 MAX_SCHEDULE_TIMEOUT);
3881
3882 if (ret == 0 && val & DROP_IDLE)
Chris Wilson6b048702018-09-03 09:33:37 +01003883 ret = i915_gem_wait_for_idle(i915,
Chris Wilson00c26cf2017-05-24 17:26:53 +01003884 I915_WAIT_INTERRUPTIBLE |
Chris Wilsonec625fb2018-07-09 13:20:42 +01003885 I915_WAIT_LOCKED,
3886 MAX_SCHEDULE_TIMEOUT);
Chris Wilson00c26cf2017-05-24 17:26:53 +01003887
Chris Wilson6b048702018-09-03 09:33:37 +01003888 if (val & DROP_RETIRE)
3889 i915_retire_requests(i915);
3890
3891 mutex_unlock(&i915->drm.struct_mutex);
3892 }
3893
Chris Wilsonc41166f2019-02-20 14:56:37 +00003894 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
Chris Wilson6b048702018-09-03 09:33:37 +01003895 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003896
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003897 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01003898 if (val & DROP_BOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003899 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01003900
Chris Wilson21ab4e72014-09-09 11:16:08 +01003901 if (val & DROP_UNBOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003902 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00003903
Chris Wilson8eadc192017-03-08 14:46:22 +00003904 if (val & DROP_SHRINK_ALL)
Chris Wilson6b048702018-09-03 09:33:37 +01003905 i915_gem_shrink_all(i915);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003906 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00003907
Chris Wilson4dfacb02018-05-31 09:22:43 +01003908 if (val & DROP_IDLE) {
Chris Wilson39705642019-05-07 13:11:08 +01003909 flush_delayed_work(&i915->gem.retire_work);
Chris Wilsonae230632019-05-07 13:11:06 +01003910 flush_work(&i915->gem.idle_work);
Chris Wilson4dfacb02018-05-31 09:22:43 +01003911 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003912
Chris Wilsonc9c704712018-02-19 22:06:31 +00003913 if (val & DROP_FREED)
Chris Wilson6b048702018-09-03 09:33:37 +01003914 i915_gem_drain_freed_objects(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003915
Chris Wilson6cffeb82019-03-18 09:51:49 +00003916 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003917}
3918
Kees Cook647416f2013-03-10 14:10:06 -07003919DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3920 i915_drop_caches_get, i915_drop_caches_set,
3921 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00003922
Kees Cook647416f2013-03-10 14:10:06 -07003923static int
Kees Cook647416f2013-03-10 14:10:06 -07003924i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003925{
David Weinehall36cdd012016-08-22 13:59:31 +03003926 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003927 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00003928 u32 snpcr = 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003929
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003930 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003931 return -ENODEV;
3932
Chris Wilsond4225a52019-01-14 14:21:23 +00003933 with_intel_runtime_pm(dev_priv, wakeref)
3934 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003935
Kees Cook647416f2013-03-10 14:10:06 -07003936 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003937
Kees Cook647416f2013-03-10 14:10:06 -07003938 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003939}
3940
Kees Cook647416f2013-03-10 14:10:06 -07003941static int
3942i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003943{
David Weinehall36cdd012016-08-22 13:59:31 +03003944 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003945 intel_wakeref_t wakeref;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003946
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003947 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003948 return -ENODEV;
3949
Kees Cook647416f2013-03-10 14:10:06 -07003950 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003951 return -EINVAL;
3952
Kees Cook647416f2013-03-10 14:10:06 -07003953 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Chris Wilsond4225a52019-01-14 14:21:23 +00003954 with_intel_runtime_pm(dev_priv, wakeref) {
3955 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003956
Chris Wilsond4225a52019-01-14 14:21:23 +00003957 /* Update the cache sharing policy here as well */
3958 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3959 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3960 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3961 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3962 }
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003963
Kees Cook647416f2013-03-10 14:10:06 -07003964 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003965}
3966
Kees Cook647416f2013-03-10 14:10:06 -07003967DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3968 i915_cache_sharing_get, i915_cache_sharing_set,
3969 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003970
David Weinehall36cdd012016-08-22 13:59:31 +03003971static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03003972 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07003973{
Chris Wilson7aa0b142018-03-13 00:40:54 +00003974#define SS_MAX 2
3975 const int ss_max = SS_MAX;
3976 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07003977 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07003978
3979 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3980 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3981 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3982 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3983
3984 for (ss = 0; ss < ss_max; ss++) {
3985 unsigned int eu_cnt;
3986
3987 if (sig1[ss] & CHV_SS_PG_ENABLE)
3988 /* skip disabled subslice */
3989 continue;
3990
Imre Deakf08a0c92016-08-31 19:13:04 +03003991 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00003992 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07003993 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3994 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3995 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3996 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03003997 sseu->eu_total += eu_cnt;
3998 sseu->eu_per_subslice = max_t(unsigned int,
3999 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004000 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004001#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004002}
4003
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004004static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4005 struct sseu_dev_info *sseu)
4006{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004007#define SS_MAX 6
Jani Nikula02584042018-12-31 16:56:41 +02004008 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004009 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004010 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004011
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004012 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004013 /*
4014 * FIXME: Valid SS Mask respects the spec and read
Alexandre Belloni3c64ea82018-11-20 16:14:15 +01004015 * only valid bits for those registers, excluding reserved
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004016 * although this seems wrong because it would leave many
4017 * subslices without ACK.
4018 */
4019 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4020 GEN10_PGCTL_VALID_SS_MASK(s);
4021 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4022 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4023 }
4024
4025 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4026 GEN9_PGCTL_SSA_EU19_ACK |
4027 GEN9_PGCTL_SSA_EU210_ACK |
4028 GEN9_PGCTL_SSA_EU311_ACK;
4029 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4030 GEN9_PGCTL_SSB_EU19_ACK |
4031 GEN9_PGCTL_SSB_EU210_ACK |
4032 GEN9_PGCTL_SSB_EU311_ACK;
4033
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004034 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004035 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4036 /* skip disabled slice */
4037 continue;
4038
4039 sseu->slice_mask |= BIT(s);
Jani Nikulaa10f3612019-05-29 11:21:50 +03004040 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004041
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004042 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004043 unsigned int eu_cnt;
4044
4045 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4046 /* skip disabled subslice */
4047 continue;
4048
4049 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4050 eu_mask[ss % 2]);
4051 sseu->eu_total += eu_cnt;
4052 sseu->eu_per_subslice = max_t(unsigned int,
4053 sseu->eu_per_subslice,
4054 eu_cnt);
4055 }
4056 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004057#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004058}
4059
David Weinehall36cdd012016-08-22 13:59:31 +03004060static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004061 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004062{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004063#define SS_MAX 3
Jani Nikula02584042018-12-31 16:56:41 +02004064 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004065 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004066 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004067
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004068 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004069 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4070 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4071 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4072 }
4073
Jeff McGee5d395252015-04-03 18:13:17 -07004074 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4075 GEN9_PGCTL_SSA_EU19_ACK |
4076 GEN9_PGCTL_SSA_EU210_ACK |
4077 GEN9_PGCTL_SSA_EU311_ACK;
4078 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4079 GEN9_PGCTL_SSB_EU19_ACK |
4080 GEN9_PGCTL_SSB_EU210_ACK |
4081 GEN9_PGCTL_SSB_EU311_ACK;
4082
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004083 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004084 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4085 /* skip disabled slice */
4086 continue;
4087
Imre Deakf08a0c92016-08-31 19:13:04 +03004088 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004089
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004090 if (IS_GEN9_BC(dev_priv))
Jani Nikulaa10f3612019-05-29 11:21:50 +03004091 sseu->subslice_mask[s] =
4092 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004093
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004094 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004095 unsigned int eu_cnt;
4096
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004097 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004098 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4099 /* skip disabled subslice */
4100 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004101
Jani Nikulaa10f3612019-05-29 11:21:50 +03004102 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004103 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004104
Jeff McGee5d395252015-04-03 18:13:17 -07004105 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4106 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004107 sseu->eu_total += eu_cnt;
4108 sseu->eu_per_subslice = max_t(unsigned int,
4109 sseu->eu_per_subslice,
4110 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004111 }
4112 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004113#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004114}
4115
David Weinehall36cdd012016-08-22 13:59:31 +03004116static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004117 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004118{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004119 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004120 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004121
Imre Deakf08a0c92016-08-31 19:13:04 +03004122 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004123
Imre Deakf08a0c92016-08-31 19:13:04 +03004124 if (sseu->slice_mask) {
Jani Nikulaa10f3612019-05-29 11:21:50 +03004125 sseu->eu_per_subslice =
4126 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
4127 for (s = 0; s < fls(sseu->slice_mask); s++) {
4128 sseu->subslice_mask[s] =
4129 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4130 }
Imre Deak57ec1712016-08-31 19:13:05 +03004131 sseu->eu_total = sseu->eu_per_subslice *
Stuart Summers0040fd12019-05-24 08:40:21 -07004132 intel_sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004133
4134 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004135 for (s = 0; s < fls(sseu->slice_mask); s++) {
Jani Nikulaa10f3612019-05-29 11:21:50 +03004136 u8 subslice_7eu =
4137 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004138
Imre Deak915490d2016-08-31 19:13:01 +03004139 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004140 }
4141 }
4142}
4143
Imre Deak615d8902016-08-31 19:13:03 +03004144static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4145 const struct sseu_dev_info *sseu)
4146{
4147 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4148 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004149 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004150
Imre Deakc67ba532016-08-31 19:13:06 +03004151 seq_printf(m, " %s Slice Mask: %04x\n", type,
4152 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004153 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004154 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004155 seq_printf(m, " %s Subslice Total: %u\n", type,
Stuart Summers0040fd12019-05-24 08:40:21 -07004156 intel_sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004157 for (s = 0; s < fls(sseu->slice_mask); s++) {
4158 seq_printf(m, " %s Slice%i subslices: %u\n", type,
Stuart Summersb5ab1ab2019-05-24 08:40:20 -07004159 s, intel_sseu_subslices_per_slice(sseu, s));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004160 }
Imre Deak615d8902016-08-31 19:13:03 +03004161 seq_printf(m, " %s EU Total: %u\n", type,
4162 sseu->eu_total);
4163 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4164 sseu->eu_per_subslice);
4165
4166 if (!is_available_info)
4167 return;
4168
4169 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4170 if (HAS_POOLED_EU(dev_priv))
4171 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4172
4173 seq_printf(m, " Has Slice Power Gating: %s\n",
4174 yesno(sseu->has_slice_pg));
4175 seq_printf(m, " Has Subslice Power Gating: %s\n",
4176 yesno(sseu->has_subslice_pg));
4177 seq_printf(m, " Has EU Power Gating: %s\n",
4178 yesno(sseu->has_eu_pg));
4179}
4180
Jeff McGee38732182015-02-13 10:27:54 -06004181static int i915_sseu_status(struct seq_file *m, void *unused)
4182{
David Weinehall36cdd012016-08-22 13:59:31 +03004183 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004184 struct sseu_dev_info sseu;
Chris Wilsona0371212019-01-14 14:21:14 +00004185 intel_wakeref_t wakeref;
Jeff McGee38732182015-02-13 10:27:54 -06004186
David Weinehall36cdd012016-08-22 13:59:31 +03004187 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004188 return -ENODEV;
4189
4190 seq_puts(m, "SSEU Device Info\n");
Jani Nikulaa10f3612019-05-29 11:21:50 +03004191 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004192
Jeff McGee7f992ab2015-02-13 10:27:55 -06004193 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004194 memset(&sseu, 0, sizeof(sseu));
Jani Nikulaa10f3612019-05-29 11:21:50 +03004195 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4196 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4197 sseu.max_eus_per_subslice =
4198 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004199
Chris Wilsond4225a52019-01-14 14:21:23 +00004200 with_intel_runtime_pm(dev_priv, wakeref) {
4201 if (IS_CHERRYVIEW(dev_priv))
4202 cherryview_sseu_device_status(dev_priv, &sseu);
4203 else if (IS_BROADWELL(dev_priv))
4204 broadwell_sseu_device_status(dev_priv, &sseu);
4205 else if (IS_GEN(dev_priv, 9))
4206 gen9_sseu_device_status(dev_priv, &sseu);
4207 else if (INTEL_GEN(dev_priv) >= 10)
4208 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004209 }
David Weinehall238010e2016-08-01 17:33:27 +03004210
Imre Deak615d8902016-08-31 19:13:03 +03004211 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004212
Jeff McGee38732182015-02-13 10:27:54 -06004213 return 0;
4214}
4215
Ben Widawsky6d794d42011-04-25 11:25:56 -07004216static int i915_forcewake_open(struct inode *inode, struct file *file)
4217{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004218 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004219
Chris Wilsond7a133d2017-09-07 14:44:41 +01004220 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004221 return 0;
4222
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004223 file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07004224 intel_uncore_forcewake_user_get(&i915->uncore);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004225
4226 return 0;
4227}
4228
Ben Widawskyc43b5632012-04-16 14:07:40 -07004229static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004230{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004231 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004232
Chris Wilsond7a133d2017-09-07 14:44:41 +01004233 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004234 return 0;
4235
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07004236 intel_uncore_forcewake_user_put(&i915->uncore);
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004237 intel_runtime_pm_put(i915,
4238 (intel_wakeref_t)(uintptr_t)file->private_data);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004239
4240 return 0;
4241}
4242
4243static const struct file_operations i915_forcewake_fops = {
4244 .owner = THIS_MODULE,
4245 .open = i915_forcewake_open,
4246 .release = i915_forcewake_release,
4247};
4248
Lyude317eaa92017-02-03 21:18:25 -05004249static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4250{
4251 struct drm_i915_private *dev_priv = m->private;
4252 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4253
Lyude Paul6fc5d782018-11-20 19:37:17 -05004254 /* Synchronize with everything first in case there's been an HPD
4255 * storm, but we haven't finished handling it in the kernel yet
4256 */
4257 synchronize_irq(dev_priv->drm.irq);
4258 flush_work(&dev_priv->hotplug.dig_port_work);
4259 flush_work(&dev_priv->hotplug.hotplug_work);
4260
Lyude317eaa92017-02-03 21:18:25 -05004261 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4262 seq_printf(m, "Detected: %s\n",
4263 yesno(delayed_work_pending(&hotplug->reenable_work)));
4264
4265 return 0;
4266}
4267
4268static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4269 const char __user *ubuf, size_t len,
4270 loff_t *offp)
4271{
4272 struct seq_file *m = file->private_data;
4273 struct drm_i915_private *dev_priv = m->private;
4274 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4275 unsigned int new_threshold;
4276 int i;
4277 char *newline;
4278 char tmp[16];
4279
4280 if (len >= sizeof(tmp))
4281 return -EINVAL;
4282
4283 if (copy_from_user(tmp, ubuf, len))
4284 return -EFAULT;
4285
4286 tmp[len] = '\0';
4287
4288 /* Strip newline, if any */
4289 newline = strchr(tmp, '\n');
4290 if (newline)
4291 *newline = '\0';
4292
4293 if (strcmp(tmp, "reset") == 0)
4294 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4295 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4296 return -EINVAL;
4297
4298 if (new_threshold > 0)
4299 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4300 new_threshold);
4301 else
4302 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4303
4304 spin_lock_irq(&dev_priv->irq_lock);
4305 hotplug->hpd_storm_threshold = new_threshold;
4306 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4307 for_each_hpd_pin(i)
4308 hotplug->stats[i].count = 0;
4309 spin_unlock_irq(&dev_priv->irq_lock);
4310
4311 /* Re-enable hpd immediately if we were in an irq storm */
4312 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4313
4314 return len;
4315}
4316
4317static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4318{
4319 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4320}
4321
4322static const struct file_operations i915_hpd_storm_ctl_fops = {
4323 .owner = THIS_MODULE,
4324 .open = i915_hpd_storm_ctl_open,
4325 .read = seq_read,
4326 .llseek = seq_lseek,
4327 .release = single_release,
4328 .write = i915_hpd_storm_ctl_write
4329};
4330
Lyude Paul9a64c652018-11-06 16:30:16 -05004331static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4332{
4333 struct drm_i915_private *dev_priv = m->private;
4334
4335 seq_printf(m, "Enabled: %s\n",
4336 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4337
4338 return 0;
4339}
4340
4341static int
4342i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4343{
4344 return single_open(file, i915_hpd_short_storm_ctl_show,
4345 inode->i_private);
4346}
4347
4348static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4349 const char __user *ubuf,
4350 size_t len, loff_t *offp)
4351{
4352 struct seq_file *m = file->private_data;
4353 struct drm_i915_private *dev_priv = m->private;
4354 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4355 char *newline;
4356 char tmp[16];
4357 int i;
4358 bool new_state;
4359
4360 if (len >= sizeof(tmp))
4361 return -EINVAL;
4362
4363 if (copy_from_user(tmp, ubuf, len))
4364 return -EFAULT;
4365
4366 tmp[len] = '\0';
4367
4368 /* Strip newline, if any */
4369 newline = strchr(tmp, '\n');
4370 if (newline)
4371 *newline = '\0';
4372
4373 /* Reset to the "default" state for this system */
4374 if (strcmp(tmp, "reset") == 0)
4375 new_state = !HAS_DP_MST(dev_priv);
4376 else if (kstrtobool(tmp, &new_state) != 0)
4377 return -EINVAL;
4378
4379 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4380 new_state ? "En" : "Dis");
4381
4382 spin_lock_irq(&dev_priv->irq_lock);
4383 hotplug->hpd_short_storm_enabled = new_state;
4384 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4385 for_each_hpd_pin(i)
4386 hotplug->stats[i].count = 0;
4387 spin_unlock_irq(&dev_priv->irq_lock);
4388
4389 /* Re-enable hpd immediately if we were in an irq storm */
4390 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4391
4392 return len;
4393}
4394
4395static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4396 .owner = THIS_MODULE,
4397 .open = i915_hpd_short_storm_ctl_open,
4398 .read = seq_read,
4399 .llseek = seq_lseek,
4400 .release = single_release,
4401 .write = i915_hpd_short_storm_ctl_write,
4402};
4403
C, Ramalingam35954e82017-11-08 00:08:23 +05304404static int i915_drrs_ctl_set(void *data, u64 val)
4405{
4406 struct drm_i915_private *dev_priv = data;
4407 struct drm_device *dev = &dev_priv->drm;
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004408 struct intel_crtc *crtc;
C, Ramalingam35954e82017-11-08 00:08:23 +05304409
4410 if (INTEL_GEN(dev_priv) < 7)
4411 return -ENODEV;
4412
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004413 for_each_intel_crtc(dev, crtc) {
4414 struct drm_connector_list_iter conn_iter;
4415 struct intel_crtc_state *crtc_state;
4416 struct drm_connector *connector;
4417 struct drm_crtc_commit *commit;
4418 int ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304419
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004420 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4421 if (ret)
4422 return ret;
4423
4424 crtc_state = to_intel_crtc_state(crtc->base.state);
4425
4426 if (!crtc_state->base.active ||
4427 !crtc_state->has_drrs)
4428 goto out;
4429
4430 commit = crtc_state->base.commit;
4431 if (commit) {
4432 ret = wait_for_completion_interruptible(&commit->hw_done);
4433 if (ret)
4434 goto out;
4435 }
4436
4437 drm_connector_list_iter_begin(dev, &conn_iter);
4438 drm_for_each_connector_iter(connector, &conn_iter) {
4439 struct intel_encoder *encoder;
4440 struct intel_dp *intel_dp;
4441
4442 if (!(crtc_state->base.connector_mask &
4443 drm_connector_mask(connector)))
4444 continue;
4445
4446 encoder = intel_attached_encoder(connector);
C, Ramalingam35954e82017-11-08 00:08:23 +05304447 if (encoder->type != INTEL_OUTPUT_EDP)
4448 continue;
4449
4450 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4451 val ? "en" : "dis", val);
4452
4453 intel_dp = enc_to_intel_dp(&encoder->base);
4454 if (val)
4455 intel_edp_drrs_enable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004456 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304457 else
4458 intel_edp_drrs_disable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004459 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304460 }
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004461 drm_connector_list_iter_end(&conn_iter);
4462
4463out:
4464 drm_modeset_unlock(&crtc->base.mutex);
4465 if (ret)
4466 return ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304467 }
C, Ramalingam35954e82017-11-08 00:08:23 +05304468
4469 return 0;
4470}
4471
4472DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4473
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004474static ssize_t
4475i915_fifo_underrun_reset_write(struct file *filp,
4476 const char __user *ubuf,
4477 size_t cnt, loff_t *ppos)
4478{
4479 struct drm_i915_private *dev_priv = filp->private_data;
4480 struct intel_crtc *intel_crtc;
4481 struct drm_device *dev = &dev_priv->drm;
4482 int ret;
4483 bool reset;
4484
4485 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4486 if (ret)
4487 return ret;
4488
4489 if (!reset)
4490 return cnt;
4491
4492 for_each_intel_crtc(dev, intel_crtc) {
4493 struct drm_crtc_commit *commit;
4494 struct intel_crtc_state *crtc_state;
4495
4496 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4497 if (ret)
4498 return ret;
4499
4500 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4501 commit = crtc_state->base.commit;
4502 if (commit) {
4503 ret = wait_for_completion_interruptible(&commit->hw_done);
4504 if (!ret)
4505 ret = wait_for_completion_interruptible(&commit->flip_done);
4506 }
4507
4508 if (!ret && crtc_state->base.active) {
4509 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4510 pipe_name(intel_crtc->pipe));
4511
4512 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4513 }
4514
4515 drm_modeset_unlock(&intel_crtc->base.mutex);
4516
4517 if (ret)
4518 return ret;
4519 }
4520
4521 ret = intel_fbc_reset_underrun(dev_priv);
4522 if (ret)
4523 return ret;
4524
4525 return cnt;
4526}
4527
4528static const struct file_operations i915_fifo_underrun_reset_ops = {
4529 .owner = THIS_MODULE,
4530 .open = simple_open,
4531 .write = i915_fifo_underrun_reset_write,
4532 .llseek = default_llseek,
4533};
4534
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004535static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004536 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004537 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004538 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004539 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004540 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004541 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004542 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004543 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004544 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004545 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004546 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004547 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304548 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004549 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004550 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004551 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004552 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004553 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004554 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004555 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004556 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004557 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004558 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004559 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004560 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004561 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004562 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004563 {"i915_swizzle_info", i915_swizzle_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004564 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004565 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004566 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004567 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004568 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004569 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004570 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004571 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004572 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004573 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004574 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004575 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004576 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004577 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004578 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304579 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004580 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004581};
Ben Gamari27c202a2009-07-01 22:26:52 -04004582#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004583
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004584static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004585 const char *name;
4586 const struct file_operations *fops;
4587} i915_debugfs_files[] = {
4588 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004589 {"i915_cache_sharing", &i915_cache_sharing_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004590 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004591#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004592 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004593 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004594#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004595 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004596 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4597 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4598 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004599 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004600 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4601 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304602 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004603 {"i915_guc_log_level", &i915_guc_log_level_fops},
4604 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304605 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
Lyude Paul9a64c652018-11-06 16:30:16 -05004606 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304607 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004608 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4609 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004610};
4611
Chris Wilson1dac8912016-06-24 14:00:17 +01004612int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004613{
Chris Wilson91c8a322016-07-05 10:40:23 +01004614 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004615 struct dentry *ent;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004616 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004617
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004618 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4619 minor->debugfs_root, to_i915(minor->dev),
4620 &i915_forcewake_fops);
4621 if (!ent)
4622 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004623
Daniel Vetter34b96742013-07-04 20:49:44 +02004624 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004625 ent = debugfs_create_file(i915_debugfs_files[i].name,
4626 S_IRUGO | S_IWUSR,
4627 minor->debugfs_root,
4628 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004629 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004630 if (!ent)
4631 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004632 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004633
Ben Gamari27c202a2009-07-01 22:26:52 -04004634 return drm_debugfs_create_files(i915_debugfs_list,
4635 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004636 minor->debugfs_root, minor);
4637}
4638
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004639struct dpcd_block {
4640 /* DPCD dump start address. */
4641 unsigned int offset;
4642 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4643 unsigned int end;
4644 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4645 size_t size;
4646 /* Only valid for eDP. */
4647 bool edp;
4648};
4649
4650static const struct dpcd_block i915_dpcd_debug[] = {
4651 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4652 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4653 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4654 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4655 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4656 { .offset = DP_SET_POWER },
4657 { .offset = DP_EDP_DPCD_REV },
4658 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4659 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4660 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4661};
4662
4663static int i915_dpcd_show(struct seq_file *m, void *data)
4664{
4665 struct drm_connector *connector = m->private;
4666 struct intel_dp *intel_dp =
4667 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Jani Nikulae5315212019-01-16 11:15:23 +02004668 u8 buf[16];
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004669 ssize_t err;
4670 int i;
4671
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004672 if (connector->status != connector_status_connected)
4673 return -ENODEV;
4674
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004675 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4676 const struct dpcd_block *b = &i915_dpcd_debug[i];
4677 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4678
4679 if (b->edp &&
4680 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4681 continue;
4682
4683 /* low tech for now */
4684 if (WARN_ON(size > sizeof(buf)))
4685 continue;
4686
4687 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
Chris Wilson65404c82018-10-10 09:17:06 +01004688 if (err < 0)
4689 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4690 else
4691 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004692 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004693
4694 return 0;
4695}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004696DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004697
David Weinehallecbd6782016-08-23 12:23:56 +03004698static int i915_panel_show(struct seq_file *m, void *data)
4699{
4700 struct drm_connector *connector = m->private;
4701 struct intel_dp *intel_dp =
4702 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4703
4704 if (connector->status != connector_status_connected)
4705 return -ENODEV;
4706
4707 seq_printf(m, "Panel power up delay: %d\n",
4708 intel_dp->panel_power_up_delay);
4709 seq_printf(m, "Panel power down delay: %d\n",
4710 intel_dp->panel_power_down_delay);
4711 seq_printf(m, "Backlight on delay: %d\n",
4712 intel_dp->backlight_on_delay);
4713 seq_printf(m, "Backlight off delay: %d\n",
4714 intel_dp->backlight_off_delay);
4715
4716 return 0;
4717}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004718DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004719
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304720static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4721{
4722 struct drm_connector *connector = m->private;
4723 struct intel_connector *intel_connector = to_intel_connector(connector);
Ramalingam C43318c02019-05-07 21:57:36 +05304724 bool hdcp_cap, hdcp2_cap;
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304725
4726 if (connector->status != connector_status_connected)
4727 return -ENODEV;
4728
4729 /* HDCP is supported by connector */
Ramalingam Cd3dacc72018-10-29 15:15:46 +05304730 if (!intel_connector->hdcp.shim)
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304731 return -EINVAL;
4732
4733 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4734 connector->base.id);
Ramalingam C43318c02019-05-07 21:57:36 +05304735 hdcp_cap = intel_hdcp_capable(intel_connector);
4736 hdcp2_cap = intel_hdcp2_capable(intel_connector);
4737
4738 if (hdcp_cap)
4739 seq_puts(m, "HDCP1.4 ");
4740 if (hdcp2_cap)
4741 seq_puts(m, "HDCP2.2 ");
4742
4743 if (!hdcp_cap && !hdcp2_cap)
4744 seq_puts(m, "None");
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304745 seq_puts(m, "\n");
4746
4747 return 0;
4748}
4749DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4750
Manasi Navaree845f092018-12-05 16:54:07 -08004751static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4752{
4753 struct drm_connector *connector = m->private;
4754 struct drm_device *dev = connector->dev;
4755 struct drm_crtc *crtc;
4756 struct intel_dp *intel_dp;
4757 struct drm_modeset_acquire_ctx ctx;
4758 struct intel_crtc_state *crtc_state = NULL;
4759 int ret = 0;
4760 bool try_again = false;
4761
4762 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4763
4764 do {
Manasi Navare6afe8922018-12-19 15:51:20 -08004765 try_again = false;
Manasi Navaree845f092018-12-05 16:54:07 -08004766 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4767 &ctx);
4768 if (ret) {
Chris Wilsonee6df562019-03-29 16:51:52 +00004769 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4770 try_again = true;
4771 continue;
4772 }
Manasi Navaree845f092018-12-05 16:54:07 -08004773 break;
4774 }
4775 crtc = connector->state->crtc;
4776 if (connector->status != connector_status_connected || !crtc) {
4777 ret = -ENODEV;
4778 break;
4779 }
4780 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4781 if (ret == -EDEADLK) {
4782 ret = drm_modeset_backoff(&ctx);
4783 if (!ret) {
4784 try_again = true;
4785 continue;
4786 }
4787 break;
4788 } else if (ret) {
4789 break;
4790 }
4791 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4792 crtc_state = to_intel_crtc_state(crtc->state);
4793 seq_printf(m, "DSC_Enabled: %s\n",
4794 yesno(crtc_state->dsc_params.compression_enable));
Radhakrishna Sripadafed85692019-01-09 13:14:14 -08004795 seq_printf(m, "DSC_Sink_Support: %s\n",
4796 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
Manasi Navarefeb88462019-04-05 15:48:21 -07004797 seq_printf(m, "Force_DSC_Enable: %s\n",
4798 yesno(intel_dp->force_dsc_en));
Manasi Navaree845f092018-12-05 16:54:07 -08004799 if (!intel_dp_is_edp(intel_dp))
4800 seq_printf(m, "FEC_Sink_Support: %s\n",
4801 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4802 } while (try_again);
4803
4804 drm_modeset_drop_locks(&ctx);
4805 drm_modeset_acquire_fini(&ctx);
4806
4807 return ret;
4808}
4809
4810static ssize_t i915_dsc_fec_support_write(struct file *file,
4811 const char __user *ubuf,
4812 size_t len, loff_t *offp)
4813{
4814 bool dsc_enable = false;
4815 int ret;
4816 struct drm_connector *connector =
4817 ((struct seq_file *)file->private_data)->private;
4818 struct intel_encoder *encoder = intel_attached_encoder(connector);
4819 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4820
4821 if (len == 0)
4822 return 0;
4823
4824 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4825 len);
4826
4827 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4828 if (ret < 0)
4829 return ret;
4830
4831 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4832 (dsc_enable) ? "true" : "false");
4833 intel_dp->force_dsc_en = dsc_enable;
4834
4835 *offp += len;
4836 return len;
4837}
4838
4839static int i915_dsc_fec_support_open(struct inode *inode,
4840 struct file *file)
4841{
4842 return single_open(file, i915_dsc_fec_support_show,
4843 inode->i_private);
4844}
4845
4846static const struct file_operations i915_dsc_fec_support_fops = {
4847 .owner = THIS_MODULE,
4848 .open = i915_dsc_fec_support_open,
4849 .read = seq_read,
4850 .llseek = seq_lseek,
4851 .release = single_release,
4852 .write = i915_dsc_fec_support_write
4853};
4854
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004855/**
4856 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4857 * @connector: pointer to a registered drm_connector
4858 *
4859 * Cleanup will be done by drm_connector_unregister() through a call to
4860 * drm_debugfs_connector_remove().
4861 *
4862 * Returns 0 on success, negative error codes on error.
4863 */
4864int i915_debugfs_connector_add(struct drm_connector *connector)
4865{
4866 struct dentry *root = connector->debugfs_entry;
Manasi Navaree845f092018-12-05 16:54:07 -08004867 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004868
4869 /* The connector must have been registered beforehands. */
4870 if (!root)
4871 return -ENODEV;
4872
4873 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4874 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004875 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4876 connector, &i915_dpcd_fops);
4877
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004878 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
David Weinehallecbd6782016-08-23 12:23:56 +03004879 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4880 connector, &i915_panel_fops);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004881 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4882 connector, &i915_psr_sink_status_fops);
4883 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004884
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304885 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4886 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4887 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4888 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4889 connector, &i915_hdcp_sink_capability_fops);
4890 }
4891
Manasi Navaree845f092018-12-05 16:54:07 -08004892 if (INTEL_GEN(dev_priv) >= 10 &&
4893 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4894 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4895 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4896 connector, &i915_dsc_fec_support_fops);
4897
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004898 return 0;
4899}