blob: 6972f9b6ae83035e99c9748fdc27dc086986aa47 [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010029#include <linux/sched/mm.h>
Jani Nikula98afa312019-04-05 14:00:08 +030030#include <linux/sort.h>
31
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010032#include <drm/drm_debugfs.h>
33#include <drm/drm_fourcc.h>
Ben Gamari20172632009-02-17 20:08:50 -050034
Chris Wilson112ed2d2019-04-24 18:48:39 +010035#include "gt/intel_reset.h"
36
Jani Nikula27fec1f2019-04-05 14:00:17 +030037#include "intel_dp.h"
Jani Nikula98afa312019-04-05 14:00:08 +030038#include "intel_drv.h"
39#include "intel_fbc.h"
40#include "intel_guc_submission.h"
Jani Nikula408bd912019-04-05 14:00:13 +030041#include "intel_hdcp.h"
Jani Nikula05506912019-04-05 14:00:18 +030042#include "intel_hdmi.h"
Jani Nikula696173b2019-04-05 14:00:15 +030043#include "intel_pm.h"
Jani Nikula55367a22019-04-05 14:00:09 +030044#include "intel_psr.h"
Chris Wilson9f588922019-01-16 15:33:04 +000045
David Weinehall36cdd012016-08-22 13:59:31 +030046static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
47{
48 return to_i915(node->minor->dev);
49}
50
Chris Wilson70d39fe2010-08-25 16:03:34 +010051static int i915_capabilities(struct seq_file *m, void *data)
52{
David Weinehall36cdd012016-08-22 13:59:31 +030053 struct drm_i915_private *dev_priv = node_to_i915(m->private);
54 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000055 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010056
David Weinehall36cdd012016-08-22 13:59:31 +030057 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020058 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030059 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000060
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000061 intel_device_info_dump_flags(info, &p);
Jani Nikula02584042018-12-31 16:56:41 +020062 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000063 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010064
Chris Wilson418e3cd2017-02-06 21:36:08 +000065 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000066 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000067 kernel_param_unlock(THIS_MODULE);
68
Chris Wilson70d39fe2010-08-25 16:03:34 +010069 return 0;
70}
Ben Gamari433e12f2009-02-17 20:08:51 -050071
Imre Deaka7363de2016-05-12 16:18:52 +030072static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000073{
Chris Wilson573adb32016-08-04 16:32:39 +010074 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000075}
76
Imre Deaka7363de2016-05-12 16:18:52 +030077static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010078{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010079 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010080}
81
Imre Deaka7363de2016-05-12 16:18:52 +030082static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000083{
Chris Wilson3e510a82016-08-05 10:14:23 +010084 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040085 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010086 case I915_TILING_NONE: return ' ';
87 case I915_TILING_X: return 'X';
88 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040089 }
Chris Wilsona6172a82009-02-11 14:26:38 +000090}
91
Imre Deaka7363de2016-05-12 16:18:52 +030092static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070093{
Chris Wilsona65adaf2017-10-09 09:43:57 +010094 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010095}
96
Imre Deaka7363de2016-05-12 16:18:52 +030097static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010098{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010099 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700100}
101
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100102static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
103{
104 u64 size = 0;
105 struct i915_vma *vma;
106
Chris Wilsone2189dd2017-12-07 21:14:07 +0000107 for_each_ggtt_vma(vma, obj) {
108 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100109 size += vma->node.size;
110 }
111
112 return size;
113}
114
Matthew Auld7393b7e2017-10-06 23:18:28 +0100115static const char *
116stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
117{
118 size_t x = 0;
119
120 switch (page_sizes) {
121 case 0:
122 return "";
123 case I915_GTT_PAGE_SIZE_4K:
124 return "4K";
125 case I915_GTT_PAGE_SIZE_64K:
126 return "64K";
127 case I915_GTT_PAGE_SIZE_2M:
128 return "2M";
129 default:
130 if (!buf)
131 return "M";
132
133 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
134 x += snprintf(buf + x, len - x, "2M, ");
135 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
136 x += snprintf(buf + x, len - x, "64K, ");
137 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
138 x += snprintf(buf + x, len - x, "4K, ");
139 buf[x-2] = '\0';
140
141 return buf;
142 }
143}
144
Chris Wilson37811fc2010-08-25 22:45:57 +0100145static void
146describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
147{
Chris Wilsonb4716182015-04-27 13:41:17 +0100148 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000149 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700150 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100151 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800152 int pin_count = 0;
153
Chris Wilson188c1ab2016-04-03 14:14:20 +0100154 lockdep_assert_held(&obj->base.dev->struct_mutex);
155
Chris Wilsond07f0e52016-10-28 13:58:44 +0100156 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100157 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100158 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100159 get_pin_flag(obj),
160 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700161 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100162 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800163 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100164 obj->read_domains,
165 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300166 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100167 obj->mm.dirty ? " dirty" : "",
168 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100169 if (obj->base.name)
170 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson528cbd12019-01-28 10:23:54 +0000171 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100172 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800173 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300174 }
175 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100176 if (obj->pin_global)
177 seq_printf(m, " (global)");
Chris Wilson528cbd12019-01-28 10:23:54 +0000178 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100179 if (!drm_mm_node_allocated(&vma->node))
180 continue;
181
Matthew Auld7393b7e2017-10-06 23:18:28 +0100182 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100183 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100184 vma->node.start, vma->node.size,
185 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000186 if (i915_vma_is_ggtt(vma)) {
187 switch (vma->ggtt_view.type) {
188 case I915_GGTT_VIEW_NORMAL:
189 seq_puts(m, ", normal");
190 break;
191
192 case I915_GGTT_VIEW_PARTIAL:
193 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000194 vma->ggtt_view.partial.offset << PAGE_SHIFT,
195 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000196 break;
197
198 case I915_GGTT_VIEW_ROTATED:
199 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000200 vma->ggtt_view.rotated.plane[0].width,
201 vma->ggtt_view.rotated.plane[0].height,
202 vma->ggtt_view.rotated.plane[0].stride,
203 vma->ggtt_view.rotated.plane[0].offset,
204 vma->ggtt_view.rotated.plane[1].width,
205 vma->ggtt_view.rotated.plane[1].height,
206 vma->ggtt_view.rotated.plane[1].stride,
207 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000208 break;
209
210 default:
211 MISSING_CASE(vma->ggtt_view.type);
212 break;
213 }
214 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100215 if (vma->fence)
216 seq_printf(m, " , fence: %d%s",
217 vma->fence->id,
Chris Wilson21950ee2019-02-05 13:00:05 +0000218 i915_active_request_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000219 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700220 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000221 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100222 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100223
Chris Wilsond07f0e52016-10-28 13:58:44 +0100224 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100225 if (engine)
226 seq_printf(m, " (%s)", engine->name);
227
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100228 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
229 if (frontbuffer_bits)
230 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100231}
232
Chris Wilsone637d2c2017-03-16 13:19:57 +0000233static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100234{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000235 const struct drm_i915_gem_object *a =
236 *(const struct drm_i915_gem_object **)A;
237 const struct drm_i915_gem_object *b =
238 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100239
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200240 if (a->stolen->start < b->stolen->start)
241 return -1;
242 if (a->stolen->start > b->stolen->start)
243 return 1;
244 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100245}
246
247static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
248{
David Weinehall36cdd012016-08-22 13:59:31 +0300249 struct drm_i915_private *dev_priv = node_to_i915(m->private);
250 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000251 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100252 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300253 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000254 unsigned long total, count, n;
255 int ret;
256
257 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200258 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000259 if (!objects)
260 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100261
262 ret = mutex_lock_interruptible(&dev->struct_mutex);
263 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000264 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100265
266 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100267
268 spin_lock(&dev_priv->mm.obj_lock);
269 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000270 if (count == total)
271 break;
272
Chris Wilson6d2b88852013-08-07 18:30:54 +0100273 if (obj->stolen == NULL)
274 continue;
275
Chris Wilsone637d2c2017-03-16 13:19:57 +0000276 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100277 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100278 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000279
Chris Wilson6d2b88852013-08-07 18:30:54 +0100280 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100281 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000282 if (count == total)
283 break;
284
Chris Wilson6d2b88852013-08-07 18:30:54 +0100285 if (obj->stolen == NULL)
286 continue;
287
Chris Wilsone637d2c2017-03-16 13:19:57 +0000288 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100289 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100290 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100291 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100292
Chris Wilsone637d2c2017-03-16 13:19:57 +0000293 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
294
295 seq_puts(m, "Stolen:\n");
296 for (n = 0; n < count; n++) {
297 seq_puts(m, " ");
298 describe_obj(m, objects[n]);
299 seq_putc(m, '\n');
300 }
301 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100302 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000303
304 mutex_unlock(&dev->struct_mutex);
305out:
Michal Hocko20981052017-05-17 14:23:12 +0200306 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000307 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100308}
309
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100310struct file_stats {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000311 struct i915_address_space *vm;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300312 unsigned long count;
313 u64 total, unbound;
314 u64 global, shared;
315 u64 active, inactive;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000316 u64 closed;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100317};
318
319static int per_file_stats(int id, void *ptr, void *data)
320{
321 struct drm_i915_gem_object *obj = ptr;
322 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000323 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100324
Chris Wilson0caf81b2017-06-17 12:57:44 +0100325 lockdep_assert_held(&obj->base.dev->struct_mutex);
326
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100327 stats->count++;
328 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100329 if (!obj->bind_count)
330 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000331 if (obj->base.name || obj->base.dma_buf)
332 stats->shared += obj->base.size;
333
Chris Wilson528cbd12019-01-28 10:23:54 +0000334 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100335 if (!drm_mm_node_allocated(&vma->node))
336 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000337
Chris Wilson3272db52016-08-04 16:32:32 +0100338 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100339 stats->global += vma->node.size;
340 } else {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000341 if (vma->vm != stats->vm)
Chris Wilson6313c202014-03-19 13:45:45 +0000342 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000343 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100344
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100345 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100346 stats->active += vma->node.size;
347 else
348 stats->inactive += vma->node.size;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000349
350 if (i915_vma_is_closed(vma))
351 stats->closed += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100352 }
353
354 return 0;
355}
356
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100357#define print_file_stats(m, name, stats) do { \
358 if (stats.count) \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000359 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100360 name, \
361 stats.count, \
362 stats.total, \
363 stats.active, \
364 stats.inactive, \
365 stats.global, \
366 stats.shared, \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000367 stats.unbound, \
368 stats.closed); \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100369} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800370
371static void print_batch_pool_stats(struct seq_file *m,
372 struct drm_i915_private *dev_priv)
373{
374 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000375 struct intel_engine_cs *engine;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000376 struct file_stats stats = {};
Akash Goel3b3f1652016-10-13 22:44:48 +0530377 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000378 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800379
Akash Goel3b3f1652016-10-13 22:44:48 +0530380 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000381 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100382 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000383 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100384 batch_pool_link)
385 per_file_stats(0, obj, &stats);
386 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100387 }
Brad Volkin493018d2014-12-11 12:13:08 -0800388
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100389 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800390}
391
Chris Wilson15da9562016-05-24 14:53:43 +0100392static void print_context_stats(struct seq_file *m,
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000393 struct drm_i915_private *i915)
Chris Wilson15da9562016-05-24 14:53:43 +0100394{
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000395 struct file_stats kstats = {};
396 struct i915_gem_context *ctx;
Chris Wilson15da9562016-05-24 14:53:43 +0100397
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000398 list_for_each_entry(ctx, &i915->contexts.list, link) {
Chris Wilson7e3d9a52019-03-08 13:25:16 +0000399 struct intel_context *ce;
Chris Wilson15da9562016-05-24 14:53:43 +0100400
Chris Wilson7e3d9a52019-03-08 13:25:16 +0000401 list_for_each_entry(ce, &ctx->active_engines, active_link) {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000402 if (ce->state)
403 per_file_stats(0, ce->state->obj, &kstats);
404 if (ce->ring)
405 per_file_stats(0, ce->ring->vma->obj, &kstats);
406 }
407
408 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
409 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
410 struct drm_file *file = ctx->file_priv->file;
411 struct task_struct *task;
412 char name[80];
413
414 spin_lock(&file->table_lock);
415 idr_for_each(&file->object_idr, per_file_stats, &stats);
416 spin_unlock(&file->table_lock);
417
418 rcu_read_lock();
419 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
Chris Wilson3e055312019-03-21 14:07:10 +0000420 snprintf(name, sizeof(name), "%s",
421 task ? task->comm : "<unknown>");
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000422 rcu_read_unlock();
423
424 print_file_stats(m, name, stats);
425 }
Chris Wilson15da9562016-05-24 14:53:43 +0100426 }
Chris Wilson15da9562016-05-24 14:53:43 +0100427
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000428 print_file_stats(m, "[k]contexts", kstats);
Chris Wilson15da9562016-05-24 14:53:43 +0100429}
430
David Weinehall36cdd012016-08-22 13:59:31 +0300431static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100432{
David Weinehall36cdd012016-08-22 13:59:31 +0300433 struct drm_i915_private *dev_priv = node_to_i915(m->private);
434 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300435 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100436 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
437 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000438 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100439 unsigned int page_sizes = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100440 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100441 int ret;
442
Chris Wilson3ef7f222016-10-18 13:02:48 +0100443 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000444 dev_priv->mm.object_count,
445 dev_priv->mm.object_memory);
446
Chris Wilson1544c422016-08-15 13:18:16 +0100447 size = count = 0;
448 mapped_size = mapped_count = 0;
449 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100450 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100451
452 spin_lock(&dev_priv->mm.obj_lock);
453 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100454 size += obj->base.size;
455 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200456
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100457 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200458 purgeable_size += obj->base.size;
459 ++purgeable_count;
460 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100461
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100462 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100463 mapped_count++;
464 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100465 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100466
467 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
468 huge_count++;
469 huge_size += obj->base.size;
470 page_sizes |= obj->mm.page_sizes.sg;
471 }
Chris Wilson6299f992010-11-24 12:23:44 +0000472 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100473 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
474
475 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100476 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100477 size += obj->base.size;
478 ++count;
479
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100480 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100481 dpy_size += obj->base.size;
482 ++dpy_count;
483 }
484
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100485 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100486 purgeable_size += obj->base.size;
487 ++purgeable_count;
488 }
489
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100490 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100491 mapped_count++;
492 mapped_size += obj->base.size;
493 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100494
495 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
496 huge_count++;
497 huge_size += obj->base.size;
498 page_sizes |= obj->mm.page_sizes.sg;
499 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100500 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100501 spin_unlock(&dev_priv->mm.obj_lock);
502
Chris Wilson2bd160a2016-08-15 10:48:45 +0100503 seq_printf(m, "%u bound objects, %llu bytes\n",
504 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300505 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200506 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100507 seq_printf(m, "%u mapped objects, %llu bytes\n",
508 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100509 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
510 huge_count,
511 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
512 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100513 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100514 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000515
Matthew Auldb7128ef2017-12-11 15:18:22 +0000516 seq_printf(m, "%llu [%pa] gtt total\n",
Chris Wilson82ad6442018-06-05 16:37:58 +0100517 ggtt->vm.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100518 seq_printf(m, "Supported page sizes: %s\n",
519 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
520 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100521
Damien Lespiau267f0c92013-06-24 22:59:48 +0100522 seq_putc(m, '\n');
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000523
524 ret = mutex_lock_interruptible(&dev->struct_mutex);
525 if (ret)
526 return ret;
527
Brad Volkin493018d2014-12-11 12:13:08 -0800528 print_batch_pool_stats(m, dev_priv);
Chris Wilson15da9562016-05-24 14:53:43 +0100529 print_context_stats(m, dev_priv);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000530 mutex_unlock(&dev->struct_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100531
532 return 0;
533}
534
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100535static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000536{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100537 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300538 struct drm_i915_private *dev_priv = node_to_i915(node);
539 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100540 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000541 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300542 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100543 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000544 int count, ret;
545
Chris Wilsonf2123812017-10-16 12:40:37 +0100546 nobject = READ_ONCE(dev_priv->mm.object_count);
547 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
548 if (!objects)
549 return -ENOMEM;
550
Chris Wilson08c18322011-01-10 00:00:24 +0000551 ret = mutex_lock_interruptible(&dev->struct_mutex);
552 if (ret)
553 return ret;
554
Chris Wilsonf2123812017-10-16 12:40:37 +0100555 count = 0;
556 spin_lock(&dev_priv->mm.obj_lock);
557 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
558 objects[count++] = obj;
559 if (count == nobject)
560 break;
561 }
562 spin_unlock(&dev_priv->mm.obj_lock);
563
564 total_obj_size = total_gtt_size = 0;
565 for (n = 0; n < count; n++) {
566 obj = objects[n];
567
Damien Lespiau267f0c92013-06-24 22:59:48 +0100568 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000569 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100570 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000571 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100572 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000573 }
574
575 mutex_unlock(&dev->struct_mutex);
576
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300577 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000578 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100579 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000580
581 return 0;
582}
583
Brad Volkin493018d2014-12-11 12:13:08 -0800584static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
585{
David Weinehall36cdd012016-08-22 13:59:31 +0300586 struct drm_i915_private *dev_priv = node_to_i915(m->private);
587 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800588 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000589 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530590 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100591 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000592 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800593
594 ret = mutex_lock_interruptible(&dev->struct_mutex);
595 if (ret)
596 return ret;
597
Akash Goel3b3f1652016-10-13 22:44:48 +0530598 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000599 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100600 int count;
601
602 count = 0;
603 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000604 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100605 batch_pool_link)
606 count++;
607 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000608 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100609
610 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000611 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100612 batch_pool_link) {
613 seq_puts(m, " ");
614 describe_obj(m, obj);
615 seq_putc(m, '\n');
616 }
617
618 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100619 }
Brad Volkin493018d2014-12-11 12:13:08 -0800620 }
621
Chris Wilson8d9d5742015-04-07 16:20:38 +0100622 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800623
624 mutex_unlock(&dev->struct_mutex);
625
626 return 0;
627}
628
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200629static void gen8_display_interrupt_info(struct seq_file *m)
630{
631 struct drm_i915_private *dev_priv = node_to_i915(m->private);
632 int pipe;
633
634 for_each_pipe(dev_priv, pipe) {
635 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000636 intel_wakeref_t wakeref;
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200637
638 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000639 wakeref = intel_display_power_get_if_enabled(dev_priv,
640 power_domain);
641 if (!wakeref) {
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200642 seq_printf(m, "Pipe %c power disabled\n",
643 pipe_name(pipe));
644 continue;
645 }
646 seq_printf(m, "Pipe %c IMR:\t%08x\n",
647 pipe_name(pipe),
648 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
649 seq_printf(m, "Pipe %c IIR:\t%08x\n",
650 pipe_name(pipe),
651 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
652 seq_printf(m, "Pipe %c IER:\t%08x\n",
653 pipe_name(pipe),
654 I915_READ(GEN8_DE_PIPE_IER(pipe)));
655
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000656 intel_display_power_put(dev_priv, power_domain, wakeref);
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200657 }
658
659 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
660 I915_READ(GEN8_DE_PORT_IMR));
661 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
662 I915_READ(GEN8_DE_PORT_IIR));
663 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
664 I915_READ(GEN8_DE_PORT_IER));
665
666 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
667 I915_READ(GEN8_DE_MISC_IMR));
668 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
669 I915_READ(GEN8_DE_MISC_IIR));
670 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
671 I915_READ(GEN8_DE_MISC_IER));
672
673 seq_printf(m, "PCU interrupt mask:\t%08x\n",
674 I915_READ(GEN8_PCU_IMR));
675 seq_printf(m, "PCU interrupt identity:\t%08x\n",
676 I915_READ(GEN8_PCU_IIR));
677 seq_printf(m, "PCU interrupt enable:\t%08x\n",
678 I915_READ(GEN8_PCU_IER));
679}
680
Ben Gamari20172632009-02-17 20:08:50 -0500681static int i915_interrupt_info(struct seq_file *m, void *data)
682{
David Weinehall36cdd012016-08-22 13:59:31 +0300683 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000684 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530685 enum intel_engine_id id;
Chris Wilsona0371212019-01-14 14:21:14 +0000686 intel_wakeref_t wakeref;
Chris Wilson4bb05042016-09-03 07:53:43 +0100687 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100688
Chris Wilsona0371212019-01-14 14:21:14 +0000689 wakeref = intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500690
David Weinehall36cdd012016-08-22 13:59:31 +0300691 if (IS_CHERRYVIEW(dev_priv)) {
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000692 intel_wakeref_t pref;
693
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300694 seq_printf(m, "Master Interrupt Control:\t%08x\n",
695 I915_READ(GEN8_MASTER_IRQ));
696
697 seq_printf(m, "Display IER:\t%08x\n",
698 I915_READ(VLV_IER));
699 seq_printf(m, "Display IIR:\t%08x\n",
700 I915_READ(VLV_IIR));
701 seq_printf(m, "Display IIR_RW:\t%08x\n",
702 I915_READ(VLV_IIR_RW));
703 seq_printf(m, "Display IMR:\t%08x\n",
704 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100705 for_each_pipe(dev_priv, pipe) {
706 enum intel_display_power_domain power_domain;
707
708 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000709 pref = intel_display_power_get_if_enabled(dev_priv,
710 power_domain);
711 if (!pref) {
Chris Wilson9c870d02016-10-24 13:42:15 +0100712 seq_printf(m, "Pipe %c power disabled\n",
713 pipe_name(pipe));
714 continue;
715 }
716
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300717 seq_printf(m, "Pipe %c stat:\t%08x\n",
718 pipe_name(pipe),
719 I915_READ(PIPESTAT(pipe)));
720
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000721 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson9c870d02016-10-24 13:42:15 +0100722 }
723
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000724 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300725 seq_printf(m, "Port hotplug:\t%08x\n",
726 I915_READ(PORT_HOTPLUG_EN));
727 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
728 I915_READ(VLV_DPFLIPSTAT));
729 seq_printf(m, "DPINVGTT:\t%08x\n",
730 I915_READ(DPINVGTT));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000731 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300732
733 for (i = 0; i < 4; i++) {
734 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
735 i, I915_READ(GEN8_GT_IMR(i)));
736 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
737 i, I915_READ(GEN8_GT_IIR(i)));
738 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
739 i, I915_READ(GEN8_GT_IER(i)));
740 }
741
742 seq_printf(m, "PCU interrupt mask:\t%08x\n",
743 I915_READ(GEN8_PCU_IMR));
744 seq_printf(m, "PCU interrupt identity:\t%08x\n",
745 I915_READ(GEN8_PCU_IIR));
746 seq_printf(m, "PCU interrupt enable:\t%08x\n",
747 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200748 } else if (INTEL_GEN(dev_priv) >= 11) {
749 seq_printf(m, "Master Interrupt Control: %08x\n",
750 I915_READ(GEN11_GFX_MSTR_IRQ));
751
752 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
753 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
754 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
755 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
756 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
757 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
758 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
759 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
760 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
761 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
762 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
763 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
764
765 seq_printf(m, "Display Interrupt Control:\t%08x\n",
766 I915_READ(GEN11_DISPLAY_INT_CTL));
767
768 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300769 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700770 seq_printf(m, "Master Interrupt Control:\t%08x\n",
771 I915_READ(GEN8_MASTER_IRQ));
772
773 for (i = 0; i < 4; i++) {
774 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
775 i, I915_READ(GEN8_GT_IMR(i)));
776 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
777 i, I915_READ(GEN8_GT_IIR(i)));
778 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
779 i, I915_READ(GEN8_GT_IER(i)));
780 }
781
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200782 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300783 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700784 seq_printf(m, "Display IER:\t%08x\n",
785 I915_READ(VLV_IER));
786 seq_printf(m, "Display IIR:\t%08x\n",
787 I915_READ(VLV_IIR));
788 seq_printf(m, "Display IIR_RW:\t%08x\n",
789 I915_READ(VLV_IIR_RW));
790 seq_printf(m, "Display IMR:\t%08x\n",
791 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000792 for_each_pipe(dev_priv, pipe) {
793 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000794 intel_wakeref_t pref;
Chris Wilson4f4631a2017-02-10 13:36:32 +0000795
796 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000797 pref = intel_display_power_get_if_enabled(dev_priv,
798 power_domain);
799 if (!pref) {
Chris Wilson4f4631a2017-02-10 13:36:32 +0000800 seq_printf(m, "Pipe %c power disabled\n",
801 pipe_name(pipe));
802 continue;
803 }
804
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700805 seq_printf(m, "Pipe %c stat:\t%08x\n",
806 pipe_name(pipe),
807 I915_READ(PIPESTAT(pipe)));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000808 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson4f4631a2017-02-10 13:36:32 +0000809 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700810
811 seq_printf(m, "Master IER:\t%08x\n",
812 I915_READ(VLV_MASTER_IER));
813
814 seq_printf(m, "Render IER:\t%08x\n",
815 I915_READ(GTIER));
816 seq_printf(m, "Render IIR:\t%08x\n",
817 I915_READ(GTIIR));
818 seq_printf(m, "Render IMR:\t%08x\n",
819 I915_READ(GTIMR));
820
821 seq_printf(m, "PM IER:\t\t%08x\n",
822 I915_READ(GEN6_PMIER));
823 seq_printf(m, "PM IIR:\t\t%08x\n",
824 I915_READ(GEN6_PMIIR));
825 seq_printf(m, "PM IMR:\t\t%08x\n",
826 I915_READ(GEN6_PMIMR));
827
828 seq_printf(m, "Port hotplug:\t%08x\n",
829 I915_READ(PORT_HOTPLUG_EN));
830 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
831 I915_READ(VLV_DPFLIPSTAT));
832 seq_printf(m, "DPINVGTT:\t%08x\n",
833 I915_READ(DPINVGTT));
834
David Weinehall36cdd012016-08-22 13:59:31 +0300835 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800836 seq_printf(m, "Interrupt enable: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700837 I915_READ(GEN2_IER));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800838 seq_printf(m, "Interrupt identity: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700839 I915_READ(GEN2_IIR));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800840 seq_printf(m, "Interrupt mask: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700841 I915_READ(GEN2_IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100842 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800843 seq_printf(m, "Pipe %c stat: %08x\n",
844 pipe_name(pipe),
845 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800846 } else {
847 seq_printf(m, "North Display Interrupt enable: %08x\n",
848 I915_READ(DEIER));
849 seq_printf(m, "North Display Interrupt identity: %08x\n",
850 I915_READ(DEIIR));
851 seq_printf(m, "North Display Interrupt mask: %08x\n",
852 I915_READ(DEIMR));
853 seq_printf(m, "South Display Interrupt enable: %08x\n",
854 I915_READ(SDEIER));
855 seq_printf(m, "South Display Interrupt identity: %08x\n",
856 I915_READ(SDEIIR));
857 seq_printf(m, "South Display Interrupt mask: %08x\n",
858 I915_READ(SDEIMR));
859 seq_printf(m, "Graphics Interrupt enable: %08x\n",
860 I915_READ(GTIER));
861 seq_printf(m, "Graphics Interrupt identity: %08x\n",
862 I915_READ(GTIIR));
863 seq_printf(m, "Graphics Interrupt mask: %08x\n",
864 I915_READ(GTIMR));
865 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200866
867 if (INTEL_GEN(dev_priv) >= 11) {
868 seq_printf(m, "RCS Intr Mask:\t %08x\n",
869 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
870 seq_printf(m, "BCS Intr Mask:\t %08x\n",
871 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
872 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
873 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
874 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
875 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
876 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
877 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
878 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
879 I915_READ(GEN11_GUC_SG_INTR_MASK));
880 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
881 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
882 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
883 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
884 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
885 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
886
887 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000888 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100889 seq_printf(m,
890 "Graphics Interrupt mask (%s): %08x\n",
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -0700891 engine->name, ENGINE_READ(engine, RING_IMR));
Chris Wilson9862e602011-01-04 22:22:17 +0000892 }
Chris Wilson9862e602011-01-04 22:22:17 +0000893 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200894
Chris Wilsona0371212019-01-14 14:21:14 +0000895 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100896
Ben Gamari20172632009-02-17 20:08:50 -0500897 return 0;
898}
899
Chris Wilsona6172a82009-02-11 14:26:38 +0000900static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
901{
David Weinehall36cdd012016-08-22 13:59:31 +0300902 struct drm_i915_private *dev_priv = node_to_i915(m->private);
903 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100904 int i, ret;
905
906 ret = mutex_lock_interruptible(&dev->struct_mutex);
907 if (ret)
908 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000909
Chris Wilsona6172a82009-02-11 14:26:38 +0000910 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
911 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100912 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000913
Chris Wilson6c085a72012-08-20 11:40:46 +0200914 seq_printf(m, "Fence %d, pin count = %d, object = ",
915 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100916 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100917 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100918 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100919 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100920 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000921 }
922
Chris Wilson05394f32010-11-08 19:18:58 +0000923 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000924 return 0;
925}
926
Chris Wilson98a2f412016-10-12 10:05:18 +0100927#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000928static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
929 size_t count, loff_t *pos)
930{
Chris Wilson0e390372018-11-23 13:23:25 +0000931 struct i915_gpu_state *error;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000932 ssize_t ret;
Chris Wilson0e390372018-11-23 13:23:25 +0000933 void *buf;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000934
Chris Wilson0e390372018-11-23 13:23:25 +0000935 error = file->private_data;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000936 if (!error)
937 return 0;
938
Chris Wilson0e390372018-11-23 13:23:25 +0000939 /* Bounce buffer required because of kernfs __user API convenience. */
940 buf = kmalloc(count, GFP_KERNEL);
941 if (!buf)
942 return -ENOMEM;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000943
Chris Wilson0e390372018-11-23 13:23:25 +0000944 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
945 if (ret <= 0)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000946 goto out;
947
Chris Wilson0e390372018-11-23 13:23:25 +0000948 if (!copy_to_user(ubuf, buf, ret))
949 *pos += ret;
950 else
951 ret = -EFAULT;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000952
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000953out:
Chris Wilson0e390372018-11-23 13:23:25 +0000954 kfree(buf);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000955 return ret;
956}
957
958static int gpu_state_release(struct inode *inode, struct file *file)
959{
960 i915_gpu_state_put(file->private_data);
961 return 0;
962}
963
964static int i915_gpu_info_open(struct inode *inode, struct file *file)
965{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100966 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000967 struct i915_gpu_state *gpu;
Chris Wilsona0371212019-01-14 14:21:14 +0000968 intel_wakeref_t wakeref;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000969
Chris Wilsond4225a52019-01-14 14:21:23 +0000970 gpu = NULL;
971 with_intel_runtime_pm(i915, wakeref)
972 gpu = i915_capture_gpu_state(i915);
Chris Wilsone6154e42018-12-07 11:05:54 +0000973 if (IS_ERR(gpu))
974 return PTR_ERR(gpu);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000975
976 file->private_data = gpu;
977 return 0;
978}
979
980static const struct file_operations i915_gpu_info_fops = {
981 .owner = THIS_MODULE,
982 .open = i915_gpu_info_open,
983 .read = gpu_state_read,
984 .llseek = default_llseek,
985 .release = gpu_state_release,
986};
Chris Wilson98a2f412016-10-12 10:05:18 +0100987
Daniel Vetterd5442302012-04-27 15:17:40 +0200988static ssize_t
989i915_error_state_write(struct file *filp,
990 const char __user *ubuf,
991 size_t cnt,
992 loff_t *ppos)
993{
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000994 struct i915_gpu_state *error = filp->private_data;
995
996 if (!error)
997 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200998
999 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001000 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +02001001
1002 return cnt;
1003}
1004
1005static int i915_error_state_open(struct inode *inode, struct file *file)
1006{
Chris Wilsone6154e42018-12-07 11:05:54 +00001007 struct i915_gpu_state *error;
1008
1009 error = i915_first_error_state(inode->i_private);
1010 if (IS_ERR(error))
1011 return PTR_ERR(error);
1012
1013 file->private_data = error;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001014 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001015}
1016
Daniel Vetterd5442302012-04-27 15:17:40 +02001017static const struct file_operations i915_error_state_fops = {
1018 .owner = THIS_MODULE,
1019 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001020 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001021 .write = i915_error_state_write,
1022 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001023 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001024};
Chris Wilson98a2f412016-10-12 10:05:18 +01001025#endif
1026
Deepak Sadb4bd12014-03-31 11:30:02 +05301027static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001028{
David Weinehall36cdd012016-08-22 13:59:31 +03001029 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001030 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsona0371212019-01-14 14:21:14 +00001031 intel_wakeref_t wakeref;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001032 int ret = 0;
1033
Chris Wilsona0371212019-01-14 14:21:14 +00001034 wakeref = intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001035
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001036 if (IS_GEN(dev_priv, 5)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001037 u16 rgvswctl = I915_READ16(MEMSWCTL);
1038 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1039
1040 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1041 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1042 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1043 MEMSTAT_VID_SHIFT);
1044 seq_printf(m, "Current P-state: %d\n",
1045 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001046 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001047 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001048
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001049 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001050
1051 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1052 seq_printf(m, "Video Turbo Mode: %s\n",
1053 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1054 seq_printf(m, "HW control enabled: %s\n",
1055 yesno(rpmodectl & GEN6_RP_ENABLE));
1056 seq_printf(m, "SW control enabled: %s\n",
1057 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1058 GEN6_RP_MEDIA_SW_MODE));
1059
Wayne Boyer666a4532015-12-09 12:29:35 -08001060 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1061 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1062 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1063
1064 seq_printf(m, "actual GPU freq: %d MHz\n",
1065 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1066
1067 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001068 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001069
1070 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001071 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001072
1073 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001074 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001075
1076 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001077 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001078
1079 seq_printf(m,
1080 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001081 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001082 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001083 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001084 u32 rp_state_limits;
1085 u32 gt_perf_status;
1086 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001087 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001088 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001089 u32 rpupei, rpcurup, rpprevup;
1090 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001091 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001092 int max_freq;
1093
Bob Paauwe35040562015-06-25 14:54:07 -07001094 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001095 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001096 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1097 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1098 } else {
1099 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1100 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1101 }
1102
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001103 /* RPSTAT1 is in the GT power well */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001104 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001105
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001106 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001107 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301108 reqf >>= 23;
1109 else {
1110 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001111 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301112 reqf >>= 24;
1113 else
1114 reqf >>= 25;
1115 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001116 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001117
Chris Wilson0d8f9492014-03-27 09:06:14 +00001118 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1119 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1120 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1121
Jesse Barnesccab5c82011-01-18 15:49:25 -08001122 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301123 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1124 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1125 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1126 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1127 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1128 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001129 cagf = intel_gpu_freq(dev_priv,
1130 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001131
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001132 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001133
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001134 if (INTEL_GEN(dev_priv) >= 11) {
1135 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1136 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1137 /*
1138 * The equivalent to the PM ISR & IIR cannot be read
1139 * without affecting the current state of the system
1140 */
1141 pm_isr = 0;
1142 pm_iir = 0;
1143 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001144 pm_ier = I915_READ(GEN8_GT_IER(2));
1145 pm_imr = I915_READ(GEN8_GT_IMR(2));
1146 pm_isr = I915_READ(GEN8_GT_ISR(2));
1147 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001148 } else {
1149 pm_ier = I915_READ(GEN6_PMIER);
1150 pm_imr = I915_READ(GEN6_PMIMR);
1151 pm_isr = I915_READ(GEN6_PMISR);
1152 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001153 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001154 pm_mask = I915_READ(GEN6_PMINTRMSK);
1155
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001156 seq_printf(m, "Video Turbo Mode: %s\n",
1157 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1158 seq_printf(m, "HW control enabled: %s\n",
1159 yesno(rpmodectl & GEN6_RP_ENABLE));
1160 seq_printf(m, "SW control enabled: %s\n",
1161 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1162 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001163
1164 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1165 pm_ier, pm_imr, pm_mask);
1166 if (INTEL_GEN(dev_priv) <= 10)
1167 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1168 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301169 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001170 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001171 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001172 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001173 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001174 seq_printf(m, "Render p-state VID: %d\n",
1175 gt_perf_status & 0xff);
1176 seq_printf(m, "Render p-state limit: %d\n",
1177 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001178 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1179 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1180 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1181 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001182 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001183 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301184 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1185 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1186 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1187 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1188 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1189 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilson60548c52018-07-31 14:26:29 +01001190 seq_printf(m, "Up threshold: %d%%\n",
1191 rps->power.up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001192
Akash Goeld6cda9c2016-04-23 00:05:46 +05301193 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1194 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1195 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1196 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1197 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1198 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilson60548c52018-07-31 14:26:29 +01001199 seq_printf(m, "Down threshold: %d%%\n",
1200 rps->power.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001201
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001202 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001203 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001204 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001205 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001206 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001207 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001208
1209 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001210 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001211 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001212 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001213 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001214
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001215 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001216 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001217 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001218 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001219 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001220 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001221 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001222 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001223
Chris Wilsond86ed342015-04-27 13:41:19 +01001224 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001225 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001226 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001227 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001228 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001229 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001230 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001231 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001232 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001233 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001234 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001235 seq_printf(m,
1236 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001237 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001238 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001239 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001240 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001241
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001242 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001243 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1244 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1245
Chris Wilsona0371212019-01-14 14:21:14 +00001246 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001247 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001248}
1249
Ben Widawskyd6369512016-09-20 16:54:32 +03001250static void i915_instdone_info(struct drm_i915_private *dev_priv,
1251 struct seq_file *m,
1252 struct intel_instdone *instdone)
1253{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001254 int slice;
1255 int subslice;
1256
Ben Widawskyd6369512016-09-20 16:54:32 +03001257 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1258 instdone->instdone);
1259
1260 if (INTEL_GEN(dev_priv) <= 3)
1261 return;
1262
1263 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1264 instdone->slice_common);
1265
1266 if (INTEL_GEN(dev_priv) <= 6)
1267 return;
1268
Ben Widawskyf9e61372016-09-20 16:54:33 +03001269 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1270 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1271 slice, subslice, instdone->sampler[slice][subslice]);
1272
1273 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1274 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1275 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001276}
1277
Chris Wilsonf6544492015-01-26 18:03:04 +02001278static int i915_hangcheck_info(struct seq_file *m, void *unused)
1279{
David Weinehall36cdd012016-08-22 13:59:31 +03001280 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001281 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001282 u64 acthd[I915_NUM_ENGINES];
1283 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001284 struct intel_instdone instdone;
Chris Wilsona0371212019-01-14 14:21:14 +00001285 intel_wakeref_t wakeref;
Dave Gordonc3232b12016-03-23 18:19:53 +00001286 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001287
Chris Wilson2caffbf2019-02-08 15:37:03 +00001288 seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
Chris Wilson8af29b02016-09-09 14:11:47 +01001289 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001290 seq_puts(m, "\tWedged\n");
Chris Wilson8c185ec2017-03-16 17:13:02 +00001291 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001292 seq_puts(m, "\tDevice (global) reset in progress\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001293
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001294 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001295 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001296 return 0;
1297 }
1298
Chris Wilsond4225a52019-01-14 14:21:23 +00001299 with_intel_runtime_pm(dev_priv, wakeref) {
1300 for_each_engine(engine, dev_priv, id) {
1301 acthd[id] = intel_engine_get_active_head(engine);
Chris Wilson89531e72019-02-26 09:49:19 +00001302 seqno[id] = intel_engine_get_hangcheck_seqno(engine);
Chris Wilsond4225a52019-01-14 14:21:23 +00001303 }
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001304
Chris Wilson8a68d462019-03-05 18:03:30 +00001305 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001306 }
1307
Chris Wilson8352aea2017-03-03 09:00:56 +00001308 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1309 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001310 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1311 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001312 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1313 seq_puts(m, "Hangcheck active, work pending\n");
1314 else
1315 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001316
Chris Wilsonf73b5672017-03-02 15:03:56 +00001317 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1318
Akash Goel3b3f1652016-10-13 22:44:48 +05301319 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001320 seq_printf(m, "%s:\n", engine->name);
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001321 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
Chris Wilson89531e72019-02-26 09:49:19 +00001322 engine->hangcheck.last_seqno,
1323 seqno[id],
1324 engine->hangcheck.next_seqno,
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001325 jiffies_to_msecs(jiffies -
1326 engine->hangcheck.action_timestamp));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001327
Chris Wilsonf6544492015-01-26 18:03:04 +02001328 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001329 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001330 (long long)acthd[id]);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001331
Chris Wilson8a68d462019-03-05 18:03:30 +00001332 if (engine->id == RCS0) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001333 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001334
Ben Widawskyd6369512016-09-20 16:54:32 +03001335 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001336
Ben Widawskyd6369512016-09-20 16:54:32 +03001337 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001338
Ben Widawskyd6369512016-09-20 16:54:32 +03001339 i915_instdone_info(dev_priv, m,
1340 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001341 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001342 }
1343
1344 return 0;
1345}
1346
Michel Thierry061d06a2017-06-20 10:57:49 +01001347static int i915_reset_info(struct seq_file *m, void *unused)
1348{
1349 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1350 struct i915_gpu_error *error = &dev_priv->gpu_error;
1351 struct intel_engine_cs *engine;
1352 enum intel_engine_id id;
1353
1354 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1355
1356 for_each_engine(engine, dev_priv, id) {
1357 seq_printf(m, "%s = %u\n", engine->name,
1358 i915_reset_engine_count(error, engine));
1359 }
1360
1361 return 0;
1362}
1363
Ben Widawsky4d855292011-12-12 19:34:16 -08001364static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001365{
David Weinehall36cdd012016-08-22 13:59:31 +03001366 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001367 u32 rgvmodectl, rstdbyctl;
1368 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001369
Ben Widawsky616fdb52011-10-05 11:44:54 -07001370 rgvmodectl = I915_READ(MEMMODECTL);
1371 rstdbyctl = I915_READ(RSTDBYCTL);
1372 crstandvid = I915_READ16(CRSTANDVID);
1373
Jani Nikula742f4912015-09-03 11:16:09 +03001374 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001375 seq_printf(m, "Boost freq: %d\n",
1376 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1377 MEMMODE_BOOST_FREQ_SHIFT);
1378 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001379 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001380 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001381 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001382 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001383 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001384 seq_printf(m, "Starting frequency: P%d\n",
1385 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001386 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001387 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001388 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1389 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1390 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1391 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001392 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001393 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001394 switch (rstdbyctl & RSX_STATUS_MASK) {
1395 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001396 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001397 break;
1398 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001399 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001400 break;
1401 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001402 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001403 break;
1404 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001405 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001406 break;
1407 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001408 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001409 break;
1410 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001411 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001412 break;
1413 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001414 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001415 break;
1416 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001417
1418 return 0;
1419}
1420
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001421static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001422{
Chris Wilson233ebf52017-03-23 10:19:44 +00001423 struct drm_i915_private *i915 = node_to_i915(m->private);
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001424 struct intel_uncore *uncore = &i915->uncore;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001425 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001426 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001427
Chris Wilsond7a133d2017-09-07 14:44:41 +01001428 seq_printf(m, "user.bypass_count = %u\n",
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001429 uncore->user_forcewake.count);
Chris Wilsond7a133d2017-09-07 14:44:41 +01001430
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001431 for_each_fw_domain(fw_domain, uncore, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001432 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001433 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001434 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001435
1436 return 0;
1437}
1438
Mika Kuoppala13628772017-03-15 17:43:02 +02001439static void print_rc6_res(struct seq_file *m,
1440 const char *title,
1441 const i915_reg_t reg)
1442{
1443 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1444
1445 seq_printf(m, "%s %u (%llu us)\n",
1446 title, I915_READ(reg),
1447 intel_rc6_residency_us(dev_priv, reg));
1448}
1449
Deepak S669ab5a2014-01-10 15:18:26 +05301450static int vlv_drpc_info(struct seq_file *m)
1451{
David Weinehall36cdd012016-08-22 13:59:31 +03001452 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001453 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301454
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001455 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301456 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1457
Deepak S669ab5a2014-01-10 15:18:26 +05301458 seq_printf(m, "RC6 Enabled: %s\n",
1459 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1460 GEN6_RC_CTL_EI_MODE(1))));
1461 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001462 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301463 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001464 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301465
Mika Kuoppala13628772017-03-15 17:43:02 +02001466 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1467 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001468
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001469 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301470}
1471
Ben Widawsky4d855292011-12-12 19:34:16 -08001472static int gen6_drpc_info(struct seq_file *m)
1473{
David Weinehall36cdd012016-08-22 13:59:31 +03001474 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001475 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301476 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001477
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001478 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001479 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001480
Ben Widawsky4d855292011-12-12 19:34:16 -08001481 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001482 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301483 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1484 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1485 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001486
Imre Deak51cc9ad2018-02-08 19:41:02 +02001487 if (INTEL_GEN(dev_priv) <= 7) {
1488 mutex_lock(&dev_priv->pcu_lock);
1489 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1490 &rc6vids);
1491 mutex_unlock(&dev_priv->pcu_lock);
1492 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001493
Eric Anholtfff24e22012-01-23 16:14:05 -08001494 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001495 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1496 seq_printf(m, "RC6 Enabled: %s\n",
1497 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001498 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301499 seq_printf(m, "Render Well Gating Enabled: %s\n",
1500 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1501 seq_printf(m, "Media Well Gating Enabled: %s\n",
1502 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1503 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001504 seq_printf(m, "Deep RC6 Enabled: %s\n",
1505 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1506 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1507 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001508 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001509 switch (gt_core_status & GEN6_RCn_MASK) {
1510 case GEN6_RC0:
1511 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001512 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001513 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001514 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001515 break;
1516 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001517 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001518 break;
1519 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001520 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001521 break;
1522 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001523 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001524 break;
1525 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001526 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001527 break;
1528 }
1529
1530 seq_printf(m, "Core Power Down: %s\n",
1531 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001532 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301533 seq_printf(m, "Render Power Well: %s\n",
1534 (gen9_powergate_status &
1535 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1536 seq_printf(m, "Media Power Well: %s\n",
1537 (gen9_powergate_status &
1538 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1539 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001540
1541 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001542 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1543 GEN6_GT_GFX_RC6_LOCKED);
1544 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1545 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1546 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001547
Imre Deak51cc9ad2018-02-08 19:41:02 +02001548 if (INTEL_GEN(dev_priv) <= 7) {
1549 seq_printf(m, "RC6 voltage: %dmV\n",
1550 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1551 seq_printf(m, "RC6+ voltage: %dmV\n",
1552 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1553 seq_printf(m, "RC6++ voltage: %dmV\n",
1554 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1555 }
1556
Akash Goelf2dd7572016-06-27 20:10:01 +05301557 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001558}
1559
1560static int i915_drpc_info(struct seq_file *m, void *unused)
1561{
David Weinehall36cdd012016-08-22 13:59:31 +03001562 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001563 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00001564 int err = -ENODEV;
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001565
Chris Wilsond4225a52019-01-14 14:21:23 +00001566 with_intel_runtime_pm(dev_priv, wakeref) {
1567 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1568 err = vlv_drpc_info(m);
1569 else if (INTEL_GEN(dev_priv) >= 6)
1570 err = gen6_drpc_info(m);
1571 else
1572 err = ironlake_drpc_info(m);
1573 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001574
1575 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001576}
1577
Daniel Vetter9a851782015-06-18 10:30:22 +02001578static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1579{
David Weinehall36cdd012016-08-22 13:59:31 +03001580 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001581
1582 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1583 dev_priv->fb_tracking.busy_bits);
1584
1585 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1586 dev_priv->fb_tracking.flip_bits);
1587
1588 return 0;
1589}
1590
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001591static int i915_fbc_status(struct seq_file *m, void *unused)
1592{
David Weinehall36cdd012016-08-22 13:59:31 +03001593 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001594 struct intel_fbc *fbc = &dev_priv->fbc;
Chris Wilsona0371212019-01-14 14:21:14 +00001595 intel_wakeref_t wakeref;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001596
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001597 if (!HAS_FBC(dev_priv))
1598 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001599
Chris Wilsona0371212019-01-14 14:21:14 +00001600 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001601 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001602
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001603 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001604 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001605 else
Chris Wilson31388722017-12-20 20:58:48 +00001606 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1607
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001608 if (intel_fbc_is_active(dev_priv)) {
1609 u32 mask;
1610
1611 if (INTEL_GEN(dev_priv) >= 8)
1612 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1613 else if (INTEL_GEN(dev_priv) >= 7)
1614 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1615 else if (INTEL_GEN(dev_priv) >= 5)
1616 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1617 else if (IS_G4X(dev_priv))
1618 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1619 else
1620 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1621 FBC_STAT_COMPRESSED);
1622
1623 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001624 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001625
Chris Wilson31388722017-12-20 20:58:48 +00001626 mutex_unlock(&fbc->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00001627 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001628
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001629 return 0;
1630}
1631
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001632static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001633{
David Weinehall36cdd012016-08-22 13:59:31 +03001634 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001635
David Weinehall36cdd012016-08-22 13:59:31 +03001636 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001637 return -ENODEV;
1638
Rodrigo Vivida46f932014-08-01 02:04:45 -07001639 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001640
1641 return 0;
1642}
1643
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001644static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001645{
David Weinehall36cdd012016-08-22 13:59:31 +03001646 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001647 u32 reg;
1648
David Weinehall36cdd012016-08-22 13:59:31 +03001649 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001650 return -ENODEV;
1651
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001652 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001653
1654 reg = I915_READ(ILK_DPFC_CONTROL);
1655 dev_priv->fbc.false_color = val;
1656
1657 I915_WRITE(ILK_DPFC_CONTROL, val ?
1658 (reg | FBC_CTL_FALSE_COLOR) :
1659 (reg & ~FBC_CTL_FALSE_COLOR));
1660
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001661 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001662 return 0;
1663}
1664
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001665DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1666 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001667 "%llu\n");
1668
Paulo Zanoni92d44622013-05-31 16:33:24 -03001669static int i915_ips_status(struct seq_file *m, void *unused)
1670{
David Weinehall36cdd012016-08-22 13:59:31 +03001671 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001672 intel_wakeref_t wakeref;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001673
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001674 if (!HAS_IPS(dev_priv))
1675 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001676
Chris Wilsona0371212019-01-14 14:21:14 +00001677 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001678
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001679 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001680 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001681
David Weinehall36cdd012016-08-22 13:59:31 +03001682 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001683 seq_puts(m, "Currently: unknown\n");
1684 } else {
1685 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1686 seq_puts(m, "Currently: enabled\n");
1687 else
1688 seq_puts(m, "Currently: disabled\n");
1689 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001690
Chris Wilsona0371212019-01-14 14:21:14 +00001691 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001692
Paulo Zanoni92d44622013-05-31 16:33:24 -03001693 return 0;
1694}
1695
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001696static int i915_sr_status(struct seq_file *m, void *unused)
1697{
David Weinehall36cdd012016-08-22 13:59:31 +03001698 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001699 intel_wakeref_t wakeref;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001700 bool sr_enabled = false;
1701
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001702 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001703
Chris Wilson7342a722017-03-09 14:20:49 +00001704 if (INTEL_GEN(dev_priv) >= 9)
1705 /* no global SR status; inspect per-plane WM */;
1706 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001707 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001708 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001709 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001710 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001711 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001712 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001713 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001714 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001715 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001716 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001717
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001718 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001719
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001720 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001721
1722 return 0;
1723}
1724
Jesse Barnes7648fa92010-05-20 14:28:11 -07001725static int i915_emon_status(struct seq_file *m, void *unused)
1726{
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001727 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001728 intel_wakeref_t wakeref;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001729
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001730 if (!IS_GEN(i915, 5))
Chris Wilson582be6b2012-04-30 19:35:02 +01001731 return -ENODEV;
1732
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001733 with_intel_runtime_pm(i915, wakeref) {
1734 unsigned long temp, chipset, gfx;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001735
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001736 temp = i915_mch_val(i915);
1737 chipset = i915_chipset_val(i915);
1738 gfx = i915_gfx_val(i915);
Chris Wilsona0371212019-01-14 14:21:14 +00001739
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001740 seq_printf(m, "GMCH temp: %ld\n", temp);
1741 seq_printf(m, "Chipset power: %ld\n", chipset);
1742 seq_printf(m, "GFX power: %ld\n", gfx);
1743 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1744 }
Jesse Barnes7648fa92010-05-20 14:28:11 -07001745
1746 return 0;
1747}
1748
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001749static int i915_ring_freq_table(struct seq_file *m, void *unused)
1750{
David Weinehall36cdd012016-08-22 13:59:31 +03001751 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001752 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301753 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001754 intel_wakeref_t wakeref;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001755 int gpu_freq, ia_freq;
1756 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001757
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001758 if (!HAS_LLC(dev_priv))
1759 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001760
Chris Wilsona0371212019-01-14 14:21:14 +00001761 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001762
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001763 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001764 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001765 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001766
Chris Wilsond586b5f2018-03-08 14:26:48 +00001767 min_gpu_freq = rps->min_freq;
1768 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001769 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301770 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001771 min_gpu_freq /= GEN9_FREQ_SCALER;
1772 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301773 }
1774
Damien Lespiau267f0c92013-06-24 22:59:48 +01001775 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001776
Akash Goelf936ec32015-06-29 14:50:22 +05301777 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001778 ia_freq = gpu_freq;
1779 sandybridge_pcode_read(dev_priv,
1780 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1781 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001782 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301783 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001784 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001785 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001786 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001787 ((ia_freq >> 0) & 0xff) * 100,
1788 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001789 }
1790
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001791 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001792
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001793out:
Chris Wilsona0371212019-01-14 14:21:14 +00001794 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001795 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001796}
1797
Chris Wilson44834a62010-08-19 16:09:23 +01001798static int i915_opregion(struct seq_file *m, void *unused)
1799{
David Weinehall36cdd012016-08-22 13:59:31 +03001800 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1801 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001802 struct intel_opregion *opregion = &dev_priv->opregion;
1803 int ret;
1804
1805 ret = mutex_lock_interruptible(&dev->struct_mutex);
1806 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001807 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001808
Jani Nikula2455a8e2015-12-14 12:50:53 +02001809 if (opregion->header)
1810 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001811
1812 mutex_unlock(&dev->struct_mutex);
1813
Daniel Vetter0d38f002012-04-21 22:49:10 +02001814out:
Chris Wilson44834a62010-08-19 16:09:23 +01001815 return 0;
1816}
1817
Jani Nikulaada8f952015-12-15 13:17:12 +02001818static int i915_vbt(struct seq_file *m, void *unused)
1819{
David Weinehall36cdd012016-08-22 13:59:31 +03001820 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001821
1822 if (opregion->vbt)
1823 seq_write(m, opregion->vbt, opregion->vbt_size);
1824
1825 return 0;
1826}
1827
Chris Wilson37811fc2010-08-25 22:45:57 +01001828static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1829{
David Weinehall36cdd012016-08-22 13:59:31 +03001830 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1831 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301832 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001833 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001834 int ret;
1835
1836 ret = mutex_lock_interruptible(&dev->struct_mutex);
1837 if (ret)
1838 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001839
Daniel Vetter06957262015-08-10 13:34:08 +02001840#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001841 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001842 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001843
Chris Wilson25bcce92016-07-02 15:36:00 +01001844 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1845 fbdev_fb->base.width,
1846 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001847 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001848 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001849 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001850 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001851 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001852 seq_putc(m, '\n');
1853 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001854#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001855
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001856 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001857 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301858 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1859 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001860 continue;
1861
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001862 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001863 fb->base.width,
1864 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001865 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001866 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001867 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001868 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001869 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001870 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001871 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001872 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001873 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001874
1875 return 0;
1876}
1877
Chris Wilson7e37f882016-08-02 22:50:21 +01001878static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001879{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001880 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1881 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001882}
1883
Ben Widawskye76d3632011-03-19 18:14:29 -07001884static int i915_context_status(struct seq_file *m, void *unused)
1885{
David Weinehall36cdd012016-08-22 13:59:31 +03001886 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1887 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone2efd132016-05-24 14:53:34 +01001888 struct i915_gem_context *ctx;
Dave Gordonc3232b12016-03-23 18:19:53 +00001889 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001890
Daniel Vetterf3d28872014-05-29 23:23:08 +02001891 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001892 if (ret)
1893 return ret;
1894
Chris Wilson829a0af2017-06-20 12:05:45 +01001895 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson7e3d9a52019-03-08 13:25:16 +00001896 struct intel_context *ce;
1897
Chris Wilson288f1ce2018-09-04 16:31:17 +01001898 seq_puts(m, "HW context ");
1899 if (!list_empty(&ctx->hw_id_link))
1900 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1901 atomic_read(&ctx->hw_id_pin_count));
Chris Wilsonc84455b2016-08-15 10:49:08 +01001902 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001903 struct task_struct *task;
1904
Chris Wilsonc84455b2016-08-15 10:49:08 +01001905 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001906 if (task) {
1907 seq_printf(m, "(%s [%d]) ",
1908 task->comm, task->pid);
1909 put_task_struct(task);
1910 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001911 } else if (IS_ERR(ctx->file_priv)) {
1912 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001913 } else {
1914 seq_puts(m, "(kernel) ");
1915 }
1916
Chris Wilsonbca44d82016-05-24 14:53:41 +01001917 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1918 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001919
Chris Wilson7e3d9a52019-03-08 13:25:16 +00001920 list_for_each_entry(ce, &ctx->active_engines, active_link) {
1921 seq_printf(m, "%s: ", ce->engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001922 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001923 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001924 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001925 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001926 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001927 }
1928
Ben Widawskya33afea2013-09-17 21:12:45 -07001929 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001930 }
1931
Daniel Vetterf3d28872014-05-29 23:23:08 +02001932 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001933
1934 return 0;
1935}
1936
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001937static const char *swizzle_string(unsigned swizzle)
1938{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001939 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001940 case I915_BIT_6_SWIZZLE_NONE:
1941 return "none";
1942 case I915_BIT_6_SWIZZLE_9:
1943 return "bit9";
1944 case I915_BIT_6_SWIZZLE_9_10:
1945 return "bit9/bit10";
1946 case I915_BIT_6_SWIZZLE_9_11:
1947 return "bit9/bit11";
1948 case I915_BIT_6_SWIZZLE_9_10_11:
1949 return "bit9/bit10/bit11";
1950 case I915_BIT_6_SWIZZLE_9_17:
1951 return "bit9/bit17";
1952 case I915_BIT_6_SWIZZLE_9_10_17:
1953 return "bit9/bit10/bit17";
1954 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001955 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001956 }
1957
1958 return "bug";
1959}
1960
1961static int i915_swizzle_info(struct seq_file *m, void *data)
1962{
David Weinehall36cdd012016-08-22 13:59:31 +03001963 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001964 intel_wakeref_t wakeref;
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001965
Chris Wilsona0371212019-01-14 14:21:14 +00001966 wakeref = intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001967
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001968 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1969 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1970 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1971 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1972
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08001973 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001974 seq_printf(m, "DDC = 0x%08x\n",
1975 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01001976 seq_printf(m, "DDC2 = 0x%08x\n",
1977 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001978 seq_printf(m, "C0DRB3 = 0x%04x\n",
1979 I915_READ16(C0DRB3));
1980 seq_printf(m, "C1DRB3 = 0x%04x\n",
1981 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03001982 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001983 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1984 I915_READ(MAD_DIMM_C0));
1985 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1986 I915_READ(MAD_DIMM_C1));
1987 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1988 I915_READ(MAD_DIMM_C2));
1989 seq_printf(m, "TILECTL = 0x%08x\n",
1990 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03001991 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07001992 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1993 I915_READ(GAMTARBMODE));
1994 else
1995 seq_printf(m, "ARB_MODE = 0x%08x\n",
1996 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001997 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1998 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001999 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01002000
2001 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2002 seq_puts(m, "L-shaped memory detected\n");
2003
Chris Wilsona0371212019-01-14 14:21:14 +00002004 intel_runtime_pm_put(dev_priv, wakeref);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002005
2006 return 0;
2007}
2008
Chris Wilson7466c292016-08-15 09:49:33 +01002009static const char *rps_power_to_str(unsigned int power)
2010{
2011 static const char * const strings[] = {
2012 [LOW_POWER] = "low power",
2013 [BETWEEN] = "mixed",
2014 [HIGH_POWER] = "high power",
2015 };
2016
2017 if (power >= ARRAY_SIZE(strings) || !strings[power])
2018 return "unknown";
2019
2020 return strings[power];
2021}
2022
Chris Wilson1854d5c2015-04-07 16:20:32 +01002023static int i915_rps_boost_info(struct seq_file *m, void *data)
2024{
David Weinehall36cdd012016-08-22 13:59:31 +03002025 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002026 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002027 u32 act_freq = rps->cur_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00002028 intel_wakeref_t wakeref;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002029
Chris Wilsond4225a52019-01-14 14:21:23 +00002030 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002031 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2032 mutex_lock(&dev_priv->pcu_lock);
2033 act_freq = vlv_punit_read(dev_priv,
2034 PUNIT_REG_GPU_FREQ_STS);
2035 act_freq = (act_freq >> 8) & 0xff;
2036 mutex_unlock(&dev_priv->pcu_lock);
2037 } else {
2038 act_freq = intel_get_cagf(dev_priv,
2039 I915_READ(GEN6_RPSTAT1));
2040 }
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002041 }
2042
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002043 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson79ffac852019-04-24 21:07:17 +01002044 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002045 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002046 atomic_read(&rps->num_waiters));
Chris Wilson60548c52018-07-31 14:26:29 +01002047 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002048 seq_printf(m, "Frequency requested %d, actual %d\n",
2049 intel_gpu_freq(dev_priv, rps->cur_freq),
2050 intel_gpu_freq(dev_priv, act_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002051 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002052 intel_gpu_freq(dev_priv, rps->min_freq),
2053 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2054 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2055 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002056 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002057 intel_gpu_freq(dev_priv, rps->idle_freq),
2058 intel_gpu_freq(dev_priv, rps->efficient_freq),
2059 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002060
Chris Wilson62eb3c22019-02-13 09:25:04 +00002061 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002062
Chris Wilson79ffac852019-04-24 21:07:17 +01002063 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
Chris Wilson7466c292016-08-15 09:49:33 +01002064 u32 rpup, rpupei;
2065 u32 rpdown, rpdownei;
2066
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07002067 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson7466c292016-08-15 09:49:33 +01002068 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2069 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2070 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2071 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07002072 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson7466c292016-08-15 09:49:33 +01002073
2074 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Chris Wilson60548c52018-07-31 14:26:29 +01002075 rps_power_to_str(rps->power.mode));
Chris Wilson7466c292016-08-15 09:49:33 +01002076 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002077 rpup && rpupei ? 100 * rpup / rpupei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002078 rps->power.up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002079 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002080 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002081 rps->power.down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002082 } else {
2083 seq_puts(m, "\nRPS Autotuning inactive\n");
2084 }
2085
Chris Wilson8d3afd72015-05-21 21:01:47 +01002086 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002087}
2088
Ben Widawsky63573eb2013-07-04 11:02:07 -07002089static int i915_llc(struct seq_file *m, void *data)
2090{
David Weinehall36cdd012016-08-22 13:59:31 +03002091 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002092 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002093
David Weinehall36cdd012016-08-22 13:59:31 +03002094 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Daniele Ceraolo Spuriof6ac9932019-03-28 10:45:32 -07002095 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
2096 dev_priv->edram_size_mb);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002097
2098 return 0;
2099}
2100
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002101static int i915_huc_load_status_info(struct seq_file *m, void *data)
2102{
2103 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002104 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002105 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002106
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002107 if (!HAS_HUC(dev_priv))
2108 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002109
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002110 p = drm_seq_file_printer(m);
2111 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002112
Chris Wilsond4225a52019-01-14 14:21:23 +00002113 with_intel_runtime_pm(dev_priv, wakeref)
2114 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002115
2116 return 0;
2117}
2118
Alex Daifdf5d352015-08-12 15:43:37 +01002119static int i915_guc_load_status_info(struct seq_file *m, void *data)
2120{
David Weinehall36cdd012016-08-22 13:59:31 +03002121 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002122 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002123 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002124
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002125 if (!HAS_GUC(dev_priv))
2126 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002127
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002128 p = drm_seq_file_printer(m);
2129 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002130
Chris Wilsond4225a52019-01-14 14:21:23 +00002131 with_intel_runtime_pm(dev_priv, wakeref) {
2132 u32 tmp = I915_READ(GUC_STATUS);
2133 u32 i;
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302134
Chris Wilsond4225a52019-01-14 14:21:23 +00002135 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2136 seq_printf(m, "\tBootrom status = 0x%x\n",
2137 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2138 seq_printf(m, "\tuKernel status = 0x%x\n",
2139 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2140 seq_printf(m, "\tMIA Core status = 0x%x\n",
2141 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2142 seq_puts(m, "\nScratch registers:\n");
2143 for (i = 0; i < 16; i++) {
2144 seq_printf(m, "\t%2d: \t0x%x\n",
2145 i, I915_READ(SOFT_SCRATCH(i)));
2146 }
2147 }
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302148
Alex Daifdf5d352015-08-12 15:43:37 +01002149 return 0;
2150}
2151
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002152static const char *
2153stringify_guc_log_type(enum guc_log_buffer_type type)
2154{
2155 switch (type) {
2156 case GUC_ISR_LOG_BUFFER:
2157 return "ISR";
2158 case GUC_DPC_LOG_BUFFER:
2159 return "DPC";
2160 case GUC_CRASH_DUMP_LOG_BUFFER:
2161 return "CRASH";
2162 default:
2163 MISSING_CASE(type);
2164 }
2165
2166 return "";
2167}
2168
Akash Goel5aa1ee42016-10-12 21:54:36 +05302169static void i915_guc_log_info(struct seq_file *m,
2170 struct drm_i915_private *dev_priv)
2171{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002172 struct intel_guc_log *log = &dev_priv->guc.log;
2173 enum guc_log_buffer_type type;
2174
2175 if (!intel_guc_log_relay_enabled(log)) {
2176 seq_puts(m, "GuC log relay disabled\n");
2177 return;
2178 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302179
Michał Winiarskidb557992018-03-19 10:53:43 +01002180 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302181
Michał Winiarski6a96be22018-03-19 10:53:42 +01002182 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002183 log->relay.full_count);
2184
2185 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2186 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2187 stringify_guc_log_type(type),
2188 log->stats[type].flush,
2189 log->stats[type].sampled_overflow);
2190 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302191}
2192
Dave Gordon8b417c22015-08-12 15:43:44 +01002193static void i915_guc_client_info(struct seq_file *m,
2194 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302195 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002196{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002197 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002198 enum intel_engine_id id;
Jani Nikulae5315212019-01-16 11:15:23 +02002199 u64 tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002200
Oscar Mateob09935a2017-03-22 10:39:53 -07002201 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2202 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002203 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2204 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002205
Akash Goel3b3f1652016-10-13 22:44:48 +05302206 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002207 u64 submissions = client->submissions[id];
2208 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002209 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002210 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002211 }
2212 seq_printf(m, "\tTotal: %llu\n", tot);
2213}
2214
2215static int i915_guc_info(struct seq_file *m, void *data)
2216{
David Weinehall36cdd012016-08-22 13:59:31 +03002217 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002218 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002219
Michał Winiarskidb557992018-03-19 10:53:43 +01002220 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002221 return -ENODEV;
2222
Michał Winiarskidb557992018-03-19 10:53:43 +01002223 i915_guc_log_info(m, dev_priv);
2224
2225 if (!USES_GUC_SUBMISSION(dev_priv))
2226 return 0;
2227
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002228 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002229
Michał Winiarskidb557992018-03-19 10:53:43 +01002230 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002231 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002232 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002233
Chris Wilson334636c2016-11-29 12:10:20 +00002234 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2235 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002236 if (guc->preempt_client) {
2237 seq_printf(m, "\nGuC preempt client @ %p:\n",
2238 guc->preempt_client);
2239 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2240 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002241
2242 /* Add more as required ... */
2243
2244 return 0;
2245}
2246
Oscar Mateoa8b93702017-05-10 15:04:51 +00002247static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002248{
David Weinehall36cdd012016-08-22 13:59:31 +03002249 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002250 const struct intel_guc *guc = &dev_priv->guc;
2251 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302252 struct intel_guc_client *client = guc->execbuf_client;
Chris Wilson3a891a62019-04-01 17:26:39 +01002253 intel_engine_mask_t tmp;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002254 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002255
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002256 if (!USES_GUC_SUBMISSION(dev_priv))
2257 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002258
Oscar Mateoa8b93702017-05-10 15:04:51 +00002259 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2260 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002261
Oscar Mateoa8b93702017-05-10 15:04:51 +00002262 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2263 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002264
Oscar Mateoa8b93702017-05-10 15:04:51 +00002265 seq_printf(m, "GuC stage descriptor %u:\n", index);
2266 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2267 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2268 seq_printf(m, "\tPriority: %d\n", desc->priority);
2269 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2270 seq_printf(m, "\tEngines used: 0x%x\n",
2271 desc->engines_used);
2272 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2273 desc->db_trigger_phy,
2274 desc->db_trigger_cpu,
2275 desc->db_trigger_uk);
2276 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2277 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002278 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002279 desc->wq_addr, desc->wq_size);
2280 seq_putc(m, '\n');
2281
2282 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2283 u32 guc_engine_id = engine->guc_id;
2284 struct guc_execlist_context *lrc =
2285 &desc->lrc[guc_engine_id];
2286
2287 seq_printf(m, "\t%s LRC:\n", engine->name);
2288 seq_printf(m, "\t\tContext desc: 0x%x\n",
2289 lrc->context_desc);
2290 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2291 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2292 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2293 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2294 seq_putc(m, '\n');
2295 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002296 }
2297
Oscar Mateoa8b93702017-05-10 15:04:51 +00002298 return 0;
2299}
2300
Alex Dai4c7e77f2015-08-12 15:43:40 +01002301static int i915_guc_log_dump(struct seq_file *m, void *data)
2302{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002303 struct drm_info_node *node = m->private;
2304 struct drm_i915_private *dev_priv = node_to_i915(node);
2305 bool dump_load_err = !!node->info_ent->data;
2306 struct drm_i915_gem_object *obj = NULL;
2307 u32 *log;
2308 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002309
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002310 if (!HAS_GUC(dev_priv))
2311 return -ENODEV;
2312
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002313 if (dump_load_err)
2314 obj = dev_priv->guc.load_err_log;
2315 else if (dev_priv->guc.log.vma)
2316 obj = dev_priv->guc.log.vma->obj;
2317
2318 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002319 return 0;
2320
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002321 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2322 if (IS_ERR(log)) {
2323 DRM_DEBUG("Failed to pin object\n");
2324 seq_puts(m, "(log data unaccessible)\n");
2325 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002326 }
2327
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002328 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2329 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2330 *(log + i), *(log + i + 1),
2331 *(log + i + 2), *(log + i + 3));
2332
Alex Dai4c7e77f2015-08-12 15:43:40 +01002333 seq_putc(m, '\n');
2334
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002335 i915_gem_object_unpin_map(obj);
2336
Alex Dai4c7e77f2015-08-12 15:43:40 +01002337 return 0;
2338}
2339
Michał Winiarski4977a282018-03-19 10:53:40 +01002340static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302341{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002342 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302343
Michał Winiarski86aa8242018-03-08 16:46:53 +01002344 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002345 return -ENODEV;
2346
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002347 *val = intel_guc_log_get_level(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302348
2349 return 0;
2350}
2351
Michał Winiarski4977a282018-03-19 10:53:40 +01002352static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302353{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002354 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302355
Michał Winiarski86aa8242018-03-08 16:46:53 +01002356 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002357 return -ENODEV;
2358
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002359 return intel_guc_log_set_level(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302360}
2361
Michał Winiarski4977a282018-03-19 10:53:40 +01002362DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2363 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302364 "%lld\n");
2365
Michał Winiarski4977a282018-03-19 10:53:40 +01002366static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2367{
2368 struct drm_i915_private *dev_priv = inode->i_private;
2369
2370 if (!USES_GUC(dev_priv))
2371 return -ENODEV;
2372
2373 file->private_data = &dev_priv->guc.log;
2374
2375 return intel_guc_log_relay_open(&dev_priv->guc.log);
2376}
2377
2378static ssize_t
2379i915_guc_log_relay_write(struct file *filp,
2380 const char __user *ubuf,
2381 size_t cnt,
2382 loff_t *ppos)
2383{
2384 struct intel_guc_log *log = filp->private_data;
2385
2386 intel_guc_log_relay_flush(log);
2387
2388 return cnt;
2389}
2390
2391static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2392{
2393 struct drm_i915_private *dev_priv = inode->i_private;
2394
2395 intel_guc_log_relay_close(&dev_priv->guc.log);
2396
2397 return 0;
2398}
2399
2400static const struct file_operations i915_guc_log_relay_fops = {
2401 .owner = THIS_MODULE,
2402 .open = i915_guc_log_relay_open,
2403 .write = i915_guc_log_relay_write,
2404 .release = i915_guc_log_relay_release,
2405};
2406
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002407static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2408{
2409 u8 val;
2410 static const char * const sink_status[] = {
2411 "inactive",
2412 "transition to active, capture and display",
2413 "active, display from RFB",
2414 "active, capture and display on sink device timings",
2415 "transition to inactive, capture and display, timing re-sync",
2416 "reserved",
2417 "reserved",
2418 "sink internal error",
2419 };
2420 struct drm_connector *connector = m->private;
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002421 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002422 struct intel_dp *intel_dp =
2423 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002424 int ret;
2425
2426 if (!CAN_PSR(dev_priv)) {
2427 seq_puts(m, "PSR Unsupported\n");
2428 return -ENODEV;
2429 }
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002430
2431 if (connector->status != connector_status_connected)
2432 return -ENODEV;
2433
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002434 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2435
2436 if (ret == 1) {
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002437 const char *str = "unknown";
2438
2439 val &= DP_PSR_SINK_STATE_MASK;
2440 if (val < ARRAY_SIZE(sink_status))
2441 str = sink_status[val];
2442 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2443 } else {
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002444 return ret;
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002445 }
2446
2447 return 0;
2448}
2449DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2450
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302451static void
2452psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002453{
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002454 u32 val, status_val;
2455 const char *status = "unknown";
Chris Wilsonb86bef202017-01-16 13:06:21 +00002456
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302457 if (dev_priv->psr.psr2_enabled) {
2458 static const char * const live_status[] = {
2459 "IDLE",
2460 "CAPTURE",
2461 "CAPTURE_FS",
2462 "SLEEP",
2463 "BUFON_FW",
2464 "ML_UP",
2465 "SU_STANDBY",
2466 "FAST_SLEEP",
2467 "DEEP_SLEEP",
2468 "BUF_ON",
2469 "TG_ON"
2470 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002471 val = I915_READ(EDP_PSR2_STATUS);
2472 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2473 EDP_PSR2_STATUS_STATE_SHIFT;
2474 if (status_val < ARRAY_SIZE(live_status))
2475 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302476 } else {
2477 static const char * const live_status[] = {
2478 "IDLE",
2479 "SRDONACK",
2480 "SRDENT",
2481 "BUFOFF",
2482 "BUFON",
2483 "AUXACK",
2484 "SRDOFFACK",
2485 "SRDENT_ON",
2486 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002487 val = I915_READ(EDP_PSR_STATUS);
2488 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2489 EDP_PSR_STATUS_STATE_SHIFT;
2490 if (status_val < ARRAY_SIZE(live_status))
2491 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302492 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002493
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002494 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
Chris Wilsonb86bef202017-01-16 13:06:21 +00002495}
2496
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002497static int i915_edp_psr_status(struct seq_file *m, void *data)
2498{
David Weinehall36cdd012016-08-22 13:59:31 +03002499 struct drm_i915_private *dev_priv = node_to_i915(m->private);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002500 struct i915_psr *psr = &dev_priv->psr;
Chris Wilsona0371212019-01-14 14:21:14 +00002501 intel_wakeref_t wakeref;
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002502 const char *status;
2503 bool enabled;
2504 u32 val;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002505
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002506 if (!HAS_PSR(dev_priv))
2507 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002508
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002509 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2510 if (psr->dp)
2511 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2512 seq_puts(m, "\n");
2513
2514 if (!psr->sink_support)
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002515 return 0;
2516
Chris Wilsona0371212019-01-14 14:21:14 +00002517 wakeref = intel_runtime_pm_get(dev_priv);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002518 mutex_lock(&psr->lock);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002519
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002520 if (psr->enabled)
2521 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002522 else
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002523 status = "disabled";
2524 seq_printf(m, "PSR mode: %s\n", status);
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002525
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002526 if (!psr->enabled)
2527 goto unlock;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002528
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002529 if (psr->psr2_enabled) {
2530 val = I915_READ(EDP_PSR2_CTL);
2531 enabled = val & EDP_PSR2_ENABLE;
2532 } else {
2533 val = I915_READ(EDP_PSR_CTL);
2534 enabled = val & EDP_PSR_ENABLE;
2535 }
2536 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2537 enableddisabled(enabled), val);
2538 psr_source_status(dev_priv, m);
2539 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2540 psr->busy_frontbuffer_bits);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002541
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002542 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002543 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2544 */
David Weinehall36cdd012016-08-22 13:59:31 +03002545 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002546 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2547 seq_printf(m, "Performance counter: %u\n", val);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002548 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302549
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002550 if (psr->debug & I915_PSR_DEBUG_IRQ) {
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002551 seq_printf(m, "Last attempted entry at: %lld\n",
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002552 psr->last_entry_attempt);
2553 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002554 }
2555
José Roberto de Souzaa81f7812019-01-17 12:55:48 -08002556 if (psr->psr2_enabled) {
2557 u32 su_frames_val[3];
2558 int frame;
2559
2560 /*
2561 * Reading all 3 registers before hand to minimize crossing a
2562 * frame boundary between register reads
2563 */
2564 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2565 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2566
2567 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2568
2569 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2570 u32 su_blocks;
2571
2572 su_blocks = su_frames_val[frame / 3] &
2573 PSR2_SU_STATUS_MASK(frame);
2574 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2575 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2576 }
2577 }
2578
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002579unlock:
2580 mutex_unlock(&psr->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00002581 intel_runtime_pm_put(dev_priv, wakeref);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002582
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002583 return 0;
2584}
2585
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002586static int
2587i915_edp_psr_debug_set(void *data, u64 val)
2588{
2589 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00002590 intel_wakeref_t wakeref;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002591 int ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002592
2593 if (!CAN_PSR(dev_priv))
2594 return -ENODEV;
2595
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002596 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002597
Chris Wilsona0371212019-01-14 14:21:14 +00002598 wakeref = intel_runtime_pm_get(dev_priv);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002599
José Roberto de Souza23ec9f52019-02-06 13:18:45 -08002600 ret = intel_psr_debug_set(dev_priv, val);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002601
Chris Wilsona0371212019-01-14 14:21:14 +00002602 intel_runtime_pm_put(dev_priv, wakeref);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002603
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002604 return ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002605}
2606
2607static int
2608i915_edp_psr_debug_get(void *data, u64 *val)
2609{
2610 struct drm_i915_private *dev_priv = data;
2611
2612 if (!CAN_PSR(dev_priv))
2613 return -ENODEV;
2614
2615 *val = READ_ONCE(dev_priv->psr.debug);
2616 return 0;
2617}
2618
2619DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2620 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2621 "%llu\n");
2622
Jesse Barnesec013e72013-08-20 10:29:23 +01002623static int i915_energy_uJ(struct seq_file *m, void *data)
2624{
David Weinehall36cdd012016-08-22 13:59:31 +03002625 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002626 unsigned long long power;
Chris Wilsona0371212019-01-14 14:21:14 +00002627 intel_wakeref_t wakeref;
Jesse Barnesec013e72013-08-20 10:29:23 +01002628 u32 units;
2629
David Weinehall36cdd012016-08-22 13:59:31 +03002630 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002631 return -ENODEV;
2632
Chris Wilsond4225a52019-01-14 14:21:23 +00002633 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002634 return -ENODEV;
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002635
2636 units = (power & 0x1f00) >> 8;
Chris Wilsond4225a52019-01-14 14:21:23 +00002637 with_intel_runtime_pm(dev_priv, wakeref)
2638 power = I915_READ(MCH_SECP_NRG_STTS);
2639
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002640 power = (1000000 * power) >> units; /* convert to uJ */
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002641 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002642
2643 return 0;
2644}
2645
Damien Lespiau6455c872015-06-04 18:23:57 +01002646static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002647{
David Weinehall36cdd012016-08-22 13:59:31 +03002648 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002649 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002650
Chris Wilsona156e642016-04-03 14:14:21 +01002651 if (!HAS_RUNTIME_PM(dev_priv))
2652 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002653
Chris Wilson25c896bd2019-01-14 14:21:25 +00002654 seq_printf(m, "Runtime power status: %s\n",
2655 enableddisabled(!dev_priv->power_domains.wakeref));
2656
Chris Wilsond9948a12019-02-28 10:20:35 +00002657 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
Paulo Zanoni371db662013-08-19 13:18:10 -03002658 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002659 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002660#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002661 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002662 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002663#else
2664 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2665#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002666 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002667 pci_power_name(pdev->current_state),
2668 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002669
Chris Wilsonbd780f32019-01-14 14:21:09 +00002670 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2671 struct drm_printer p = drm_seq_file_printer(m);
2672
2673 print_intel_runtime_pm_wakeref(dev_priv, &p);
2674 }
2675
Jesse Barnesec013e72013-08-20 10:29:23 +01002676 return 0;
2677}
2678
Imre Deak1da51582013-11-25 17:15:35 +02002679static int i915_power_domain_info(struct seq_file *m, void *unused)
2680{
David Weinehall36cdd012016-08-22 13:59:31 +03002681 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002682 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2683 int i;
2684
2685 mutex_lock(&power_domains->lock);
2686
2687 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2688 for (i = 0; i < power_domains->power_well_count; i++) {
2689 struct i915_power_well *power_well;
2690 enum intel_display_power_domain power_domain;
2691
2692 power_well = &power_domains->power_wells[i];
Imre Deakf28ec6f2018-08-06 12:58:37 +03002693 seq_printf(m, "%-25s %d\n", power_well->desc->name,
Imre Deak1da51582013-11-25 17:15:35 +02002694 power_well->count);
2695
Imre Deakf28ec6f2018-08-06 12:58:37 +03002696 for_each_power_domain(power_domain, power_well->desc->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002697 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002698 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002699 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002700 }
2701
2702 mutex_unlock(&power_domains->lock);
2703
2704 return 0;
2705}
2706
Damien Lespiaub7cec662015-10-27 14:47:01 +02002707static int i915_dmc_info(struct seq_file *m, void *unused)
2708{
David Weinehall36cdd012016-08-22 13:59:31 +03002709 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002710 intel_wakeref_t wakeref;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002711 struct intel_csr *csr;
2712
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002713 if (!HAS_CSR(dev_priv))
2714 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002715
2716 csr = &dev_priv->csr;
2717
Chris Wilsona0371212019-01-14 14:21:14 +00002718 wakeref = intel_runtime_pm_get(dev_priv);
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002719
Damien Lespiaub7cec662015-10-27 14:47:01 +02002720 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2721 seq_printf(m, "path: %s\n", csr->fw_path);
2722
2723 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002724 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002725
2726 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2727 CSR_VERSION_MINOR(csr->version));
2728
Imre Deak34b2f8d2018-10-31 22:02:20 +02002729 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2730 goto out;
2731
2732 seq_printf(m, "DC3 -> DC5 count: %d\n",
2733 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2734 SKL_CSR_DC3_DC5_COUNT));
2735 if (!IS_GEN9_LP(dev_priv))
Damien Lespiau83372062015-10-30 17:53:32 +02002736 seq_printf(m, "DC5 -> DC6 count: %d\n",
2737 I915_READ(SKL_CSR_DC5_DC6_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002738
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002739out:
2740 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2741 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2742 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2743
Chris Wilsona0371212019-01-14 14:21:14 +00002744 intel_runtime_pm_put(dev_priv, wakeref);
Damien Lespiau83372062015-10-30 17:53:32 +02002745
Damien Lespiaub7cec662015-10-27 14:47:01 +02002746 return 0;
2747}
2748
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002749static void intel_seq_print_mode(struct seq_file *m, int tabs,
2750 struct drm_display_mode *mode)
2751{
2752 int i;
2753
2754 for (i = 0; i < tabs; i++)
2755 seq_putc(m, '\t');
2756
Shayenne Moura4fb6bb82018-12-20 10:27:57 -02002757 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002758}
2759
2760static void intel_encoder_info(struct seq_file *m,
2761 struct intel_crtc *intel_crtc,
2762 struct intel_encoder *intel_encoder)
2763{
David Weinehall36cdd012016-08-22 13:59:31 +03002764 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2765 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002766 struct drm_crtc *crtc = &intel_crtc->base;
2767 struct intel_connector *intel_connector;
2768 struct drm_encoder *encoder;
2769
2770 encoder = &intel_encoder->base;
2771 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002772 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002773 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2774 struct drm_connector *connector = &intel_connector->base;
2775 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2776 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002777 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002778 drm_get_connector_status_name(connector->status));
2779 if (connector->status == connector_status_connected) {
2780 struct drm_display_mode *mode = &crtc->mode;
2781 seq_printf(m, ", mode:\n");
2782 intel_seq_print_mode(m, 2, mode);
2783 } else {
2784 seq_putc(m, '\n');
2785 }
2786 }
2787}
2788
2789static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2790{
David Weinehall36cdd012016-08-22 13:59:31 +03002791 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2792 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002793 struct drm_crtc *crtc = &intel_crtc->base;
2794 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002795 struct drm_plane_state *plane_state = crtc->primary->state;
2796 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002797
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002798 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002799 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002800 fb->base.id, plane_state->src_x >> 16,
2801 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002802 else
2803 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002804 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2805 intel_encoder_info(m, intel_crtc, intel_encoder);
2806}
2807
2808static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2809{
2810 struct drm_display_mode *mode = panel->fixed_mode;
2811
2812 seq_printf(m, "\tfixed mode:\n");
2813 intel_seq_print_mode(m, 2, mode);
2814}
2815
2816static void intel_dp_info(struct seq_file *m,
2817 struct intel_connector *intel_connector)
2818{
2819 struct intel_encoder *intel_encoder = intel_connector->encoder;
2820 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2821
2822 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03002823 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02002824 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002825 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03002826
2827 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2828 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002829}
2830
Libin Yang9a148a92016-11-28 20:07:05 +08002831static void intel_dp_mst_info(struct seq_file *m,
2832 struct intel_connector *intel_connector)
2833{
2834 struct intel_encoder *intel_encoder = intel_connector->encoder;
2835 struct intel_dp_mst_encoder *intel_mst =
2836 enc_to_mst(&intel_encoder->base);
2837 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2838 struct intel_dp *intel_dp = &intel_dig_port->dp;
2839 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2840 intel_connector->port);
2841
2842 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2843}
2844
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002845static void intel_hdmi_info(struct seq_file *m,
2846 struct intel_connector *intel_connector)
2847{
2848 struct intel_encoder *intel_encoder = intel_connector->encoder;
2849 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2850
Jani Nikula742f4912015-09-03 11:16:09 +03002851 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002852}
2853
2854static void intel_lvds_info(struct seq_file *m,
2855 struct intel_connector *intel_connector)
2856{
2857 intel_panel_info(m, &intel_connector->panel);
2858}
2859
2860static void intel_connector_info(struct seq_file *m,
2861 struct drm_connector *connector)
2862{
2863 struct intel_connector *intel_connector = to_intel_connector(connector);
2864 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08002865 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002866
2867 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03002868 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002869 drm_get_connector_status_name(connector->status));
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002870
2871 if (connector->status == connector_status_disconnected)
2872 return;
2873
2874 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2875 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2876 connector->display_info.width_mm,
2877 connector->display_info.height_mm);
2878 seq_printf(m, "\tsubpixel order: %s\n",
2879 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2880 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002881
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02002882 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002883 return;
2884
2885 switch (connector->connector_type) {
2886 case DRM_MODE_CONNECTOR_DisplayPort:
2887 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08002888 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2889 intel_dp_mst_info(m, intel_connector);
2890 else
2891 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002892 break;
2893 case DRM_MODE_CONNECTOR_LVDS:
2894 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10002895 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002896 break;
2897 case DRM_MODE_CONNECTOR_HDMIA:
2898 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03002899 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002900 intel_hdmi_info(m, intel_connector);
2901 break;
2902 default:
2903 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10002904 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002905
Jesse Barnesf103fc72014-02-20 12:39:57 -08002906 seq_printf(m, "\tmodes:\n");
2907 list_for_each_entry(mode, &connector->modes, head)
2908 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002909}
2910
Robert Fekete3abc4e02015-10-27 16:58:32 +01002911static const char *plane_type(enum drm_plane_type type)
2912{
2913 switch (type) {
2914 case DRM_PLANE_TYPE_OVERLAY:
2915 return "OVL";
2916 case DRM_PLANE_TYPE_PRIMARY:
2917 return "PRI";
2918 case DRM_PLANE_TYPE_CURSOR:
2919 return "CUR";
2920 /*
2921 * Deliberately omitting default: to generate compiler warnings
2922 * when a new drm_plane_type gets added.
2923 */
2924 }
2925
2926 return "unknown";
2927}
2928
Jani Nikula5852a152019-01-07 16:51:49 +02002929static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
Robert Fekete3abc4e02015-10-27 16:58:32 +01002930{
Robert Fekete3abc4e02015-10-27 16:58:32 +01002931 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04002932 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01002933 * will print them all to visualize if the values are misused
2934 */
Jani Nikula5852a152019-01-07 16:51:49 +02002935 snprintf(buf, bufsize,
Robert Fekete3abc4e02015-10-27 16:58:32 +01002936 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04002937 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2938 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2939 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2940 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2941 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2942 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01002943 rotation);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002944}
2945
2946static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2947{
David Weinehall36cdd012016-08-22 13:59:31 +03002948 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2949 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01002950 struct intel_plane *intel_plane;
2951
2952 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2953 struct drm_plane_state *state;
2954 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002955 struct drm_format_name_buf format_name;
Jani Nikula5852a152019-01-07 16:51:49 +02002956 char rot_str[48];
Robert Fekete3abc4e02015-10-27 16:58:32 +01002957
2958 if (!plane->state) {
2959 seq_puts(m, "plane->state is NULL!\n");
2960 continue;
2961 }
2962
2963 state = plane->state;
2964
Eric Engestrom90844f02016-08-15 01:02:38 +01002965 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002966 drm_get_format_name(state->fb->format->format,
2967 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01002968 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002969 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01002970 }
2971
Jani Nikula5852a152019-01-07 16:51:49 +02002972 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2973
Robert Fekete3abc4e02015-10-27 16:58:32 +01002974 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2975 plane->base.id,
2976 plane_type(intel_plane->base.type),
2977 state->crtc_x, state->crtc_y,
2978 state->crtc_w, state->crtc_h,
2979 (state->src_x >> 16),
2980 ((state->src_x & 0xffff) * 15625) >> 10,
2981 (state->src_y >> 16),
2982 ((state->src_y & 0xffff) * 15625) >> 10,
2983 (state->src_w >> 16),
2984 ((state->src_w & 0xffff) * 15625) >> 10,
2985 (state->src_h >> 16),
2986 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002987 format_name.str,
Jani Nikula5852a152019-01-07 16:51:49 +02002988 rot_str);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002989 }
2990}
2991
2992static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2993{
2994 struct intel_crtc_state *pipe_config;
2995 int num_scalers = intel_crtc->num_scalers;
2996 int i;
2997
2998 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2999
3000 /* Not all platformas have a scaler */
3001 if (num_scalers) {
3002 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3003 num_scalers,
3004 pipe_config->scaler_state.scaler_users,
3005 pipe_config->scaler_state.scaler_id);
3006
A.Sunil Kamath58415912016-11-20 23:20:26 +05303007 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003008 struct intel_scaler *sc =
3009 &pipe_config->scaler_state.scalers[i];
3010
3011 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3012 i, yesno(sc->in_use), sc->mode);
3013 }
3014 seq_puts(m, "\n");
3015 } else {
3016 seq_puts(m, "\tNo scalers available on this platform\n");
3017 }
3018}
3019
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003020static int i915_display_info(struct seq_file *m, void *unused)
3021{
David Weinehall36cdd012016-08-22 13:59:31 +03003022 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3023 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec2014-03-12 09:13:13 +00003024 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003025 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003026 struct drm_connector_list_iter conn_iter;
Chris Wilsona0371212019-01-14 14:21:14 +00003027 intel_wakeref_t wakeref;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003028
Chris Wilsona0371212019-01-14 14:21:14 +00003029 wakeref = intel_runtime_pm_get(dev_priv);
3030
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003031 seq_printf(m, "CRTC info\n");
3032 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003033 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003034 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003035
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003036 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003037 pipe_config = to_intel_crtc_state(crtc->base.state);
3038
Robert Fekete3abc4e02015-10-27 16:58:32 +01003039 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec2014-03-12 09:13:13 +00003040 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003041 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003042 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3043 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3044
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003045 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003046 struct intel_plane *cursor =
3047 to_intel_plane(crtc->base.cursor);
3048
Chris Wilson065f2ec2014-03-12 09:13:13 +00003049 intel_crtc_info(m, crtc);
3050
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003051 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3052 yesno(cursor->base.state->visible),
3053 cursor->base.state->crtc_x,
3054 cursor->base.state->crtc_y,
3055 cursor->base.state->crtc_w,
3056 cursor->base.state->crtc_h,
3057 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003058 intel_scaler_info(m, crtc);
3059 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003060 }
Daniel Vettercace8412014-05-22 17:56:31 +02003061
3062 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3063 yesno(!crtc->cpu_fifo_underrun_disabled),
3064 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003065 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003066 }
3067
3068 seq_printf(m, "\n");
3069 seq_printf(m, "Connector info\n");
3070 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003071 mutex_lock(&dev->mode_config.mutex);
3072 drm_connector_list_iter_begin(dev, &conn_iter);
3073 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003074 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003075 drm_connector_list_iter_end(&conn_iter);
3076 mutex_unlock(&dev->mode_config.mutex);
3077
Chris Wilsona0371212019-01-14 14:21:14 +00003078 intel_runtime_pm_put(dev_priv, wakeref);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003079
3080 return 0;
3081}
3082
Chris Wilson1b365952016-10-04 21:11:31 +01003083static int i915_engine_info(struct seq_file *m, void *unused)
3084{
3085 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3086 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +00003087 intel_wakeref_t wakeref;
Akash Goel3b3f1652016-10-13 22:44:48 +05303088 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003089 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003090
Chris Wilsona0371212019-01-14 14:21:14 +00003091 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01003092
Chris Wilson79ffac852019-04-24 21:07:17 +01003093 seq_printf(m, "GT awake? %s [%d]\n",
3094 yesno(dev_priv->gt.awake),
3095 atomic_read(&dev_priv->gt.wakeref.count));
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003096 seq_printf(m, "CS timestamp frequency: %u kHz\n",
Jani Nikula02584042018-12-31 16:56:41 +02003097 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003098
Chris Wilsonf636edb2017-10-09 12:02:57 +01003099 p = drm_seq_file_printer(m);
3100 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003101 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003102
Chris Wilsona0371212019-01-14 14:21:14 +00003103 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilson9c870d02016-10-24 13:42:15 +01003104
Chris Wilson1b365952016-10-04 21:11:31 +01003105 return 0;
3106}
3107
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003108static int i915_rcs_topology(struct seq_file *m, void *unused)
3109{
3110 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3111 struct drm_printer p = drm_seq_file_printer(m);
3112
Jani Nikula02584042018-12-31 16:56:41 +02003113 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003114
3115 return 0;
3116}
3117
Chris Wilsonc5418a82017-10-13 21:26:19 +01003118static int i915_shrinker_info(struct seq_file *m, void *unused)
3119{
3120 struct drm_i915_private *i915 = node_to_i915(m->private);
3121
3122 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3123 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3124
3125 return 0;
3126}
3127
Daniel Vetter728e29d2014-06-25 22:01:53 +03003128static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3129{
David Weinehall36cdd012016-08-22 13:59:31 +03003130 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3131 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003132 int i;
3133
3134 drm_modeset_lock_all(dev);
3135 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3136 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3137
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003138 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003139 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003140 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003141 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003142 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003143 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003144 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003145 pll->state.hw_state.dpll_md);
3146 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3147 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3148 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07003149 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3150 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3151 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3152 pll->state.hw_state.mg_refclkin_ctl);
3153 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3154 pll->state.hw_state.mg_clktop2_coreclkctl1);
3155 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3156 pll->state.hw_state.mg_clktop2_hsclkctl);
3157 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3158 pll->state.hw_state.mg_pll_div0);
3159 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3160 pll->state.hw_state.mg_pll_div1);
3161 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3162 pll->state.hw_state.mg_pll_lf);
3163 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3164 pll->state.hw_state.mg_pll_frac_lock);
3165 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3166 pll->state.hw_state.mg_pll_ssc);
3167 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3168 pll->state.hw_state.mg_pll_bias);
3169 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3170 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003171 }
3172 drm_modeset_unlock_all(dev);
3173
3174 return 0;
3175}
3176
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003177static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003178{
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003179 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilson8a68d462019-03-05 18:03:30 +00003180 const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003181 struct i915_wa *wa;
3182 unsigned int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003183
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003184 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3185 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
Chris Wilson548764b2018-06-15 13:02:07 +01003186 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003187 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
Arun Siluvery888b5992014-08-26 14:44:51 +01003188
3189 return 0;
3190}
3191
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303192static int i915_ipc_status_show(struct seq_file *m, void *data)
3193{
3194 struct drm_i915_private *dev_priv = m->private;
3195
3196 seq_printf(m, "Isochronous Priority Control: %s\n",
3197 yesno(dev_priv->ipc_enabled));
3198 return 0;
3199}
3200
3201static int i915_ipc_status_open(struct inode *inode, struct file *file)
3202{
3203 struct drm_i915_private *dev_priv = inode->i_private;
3204
3205 if (!HAS_IPC(dev_priv))
3206 return -ENODEV;
3207
3208 return single_open(file, i915_ipc_status_show, dev_priv);
3209}
3210
3211static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3212 size_t len, loff_t *offp)
3213{
3214 struct seq_file *m = file->private_data;
3215 struct drm_i915_private *dev_priv = m->private;
Chris Wilsona0371212019-01-14 14:21:14 +00003216 intel_wakeref_t wakeref;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303217 bool enable;
Chris Wilsond4225a52019-01-14 14:21:23 +00003218 int ret;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303219
3220 ret = kstrtobool_from_user(ubuf, len, &enable);
3221 if (ret < 0)
3222 return ret;
3223
Chris Wilsond4225a52019-01-14 14:21:23 +00003224 with_intel_runtime_pm(dev_priv, wakeref) {
3225 if (!dev_priv->ipc_enabled && enable)
3226 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3227 dev_priv->wm.distrust_bios_wm = true;
3228 dev_priv->ipc_enabled = enable;
3229 intel_enable_ipc(dev_priv);
3230 }
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303231
3232 return len;
3233}
3234
3235static const struct file_operations i915_ipc_status_fops = {
3236 .owner = THIS_MODULE,
3237 .open = i915_ipc_status_open,
3238 .read = seq_read,
3239 .llseek = seq_lseek,
3240 .release = single_release,
3241 .write = i915_ipc_status_write
3242};
3243
Damien Lespiauc5511e42014-11-04 17:06:51 +00003244static int i915_ddb_info(struct seq_file *m, void *unused)
3245{
David Weinehall36cdd012016-08-22 13:59:31 +03003246 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3247 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003248 struct skl_ddb_entry *entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003249 struct intel_crtc *crtc;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003250
David Weinehall36cdd012016-08-22 13:59:31 +03003251 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003252 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003253
Damien Lespiauc5511e42014-11-04 17:06:51 +00003254 drm_modeset_lock_all(dev);
3255
Damien Lespiauc5511e42014-11-04 17:06:51 +00003256 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3257
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003258 for_each_intel_crtc(&dev_priv->drm, crtc) {
3259 struct intel_crtc_state *crtc_state =
3260 to_intel_crtc_state(crtc->base.state);
3261 enum pipe pipe = crtc->pipe;
3262 enum plane_id plane_id;
3263
Damien Lespiauc5511e42014-11-04 17:06:51 +00003264 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3265
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003266 for_each_plane_id_on_crtc(crtc, plane_id) {
3267 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3268 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
Damien Lespiauc5511e42014-11-04 17:06:51 +00003269 entry->start, entry->end,
3270 skl_ddb_entry_size(entry));
3271 }
3272
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003273 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003274 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3275 entry->end, skl_ddb_entry_size(entry));
3276 }
3277
3278 drm_modeset_unlock_all(dev);
3279
3280 return 0;
3281}
3282
Vandana Kannana54746e2015-03-03 20:53:10 +05303283static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003284 struct drm_device *dev,
3285 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303286{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003287 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303288 struct i915_drrs *drrs = &dev_priv->drrs;
3289 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003290 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003291 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303292
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003293 drm_connector_list_iter_begin(dev, &conn_iter);
3294 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003295 if (connector->state->crtc != &intel_crtc->base)
3296 continue;
3297
3298 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303299 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003300 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303301
3302 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3303 seq_puts(m, "\tVBT: DRRS_type: Static");
3304 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3305 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3306 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3307 seq_puts(m, "\tVBT: DRRS_type: None");
3308 else
3309 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3310
3311 seq_puts(m, "\n\n");
3312
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003313 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303314 struct intel_panel *panel;
3315
3316 mutex_lock(&drrs->mutex);
3317 /* DRRS Supported */
3318 seq_puts(m, "\tDRRS Supported: Yes\n");
3319
3320 /* disable_drrs() will make drrs->dp NULL */
3321 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303322 seq_puts(m, "Idleness DRRS: Disabled\n");
3323 if (dev_priv->psr.enabled)
3324 seq_puts(m,
3325 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303326 mutex_unlock(&drrs->mutex);
3327 return;
3328 }
3329
3330 panel = &drrs->dp->attached_connector->panel;
3331 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3332 drrs->busy_frontbuffer_bits);
3333
3334 seq_puts(m, "\n\t\t");
3335 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3336 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3337 vrefresh = panel->fixed_mode->vrefresh;
3338 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3339 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3340 vrefresh = panel->downclock_mode->vrefresh;
3341 } else {
3342 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3343 drrs->refresh_rate_type);
3344 mutex_unlock(&drrs->mutex);
3345 return;
3346 }
3347 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3348
3349 seq_puts(m, "\n\t\t");
3350 mutex_unlock(&drrs->mutex);
3351 } else {
3352 /* DRRS not supported. Print the VBT parameter*/
3353 seq_puts(m, "\tDRRS Supported : No");
3354 }
3355 seq_puts(m, "\n");
3356}
3357
3358static int i915_drrs_status(struct seq_file *m, void *unused)
3359{
David Weinehall36cdd012016-08-22 13:59:31 +03003360 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3361 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303362 struct intel_crtc *intel_crtc;
3363 int active_crtc_cnt = 0;
3364
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003365 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303366 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003367 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303368 active_crtc_cnt++;
3369 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3370
3371 drrs_status_per_crtc(m, dev, intel_crtc);
3372 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303373 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003374 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303375
3376 if (!active_crtc_cnt)
3377 seq_puts(m, "No active crtc found\n");
3378
3379 return 0;
3380}
3381
Dave Airlie11bed952014-05-12 15:22:27 +10003382static int i915_dp_mst_info(struct seq_file *m, void *unused)
3383{
David Weinehall36cdd012016-08-22 13:59:31 +03003384 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3385 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003386 struct intel_encoder *intel_encoder;
3387 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003388 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003389 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003390
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003391 drm_connector_list_iter_begin(dev, &conn_iter);
3392 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003393 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003394 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003395
3396 intel_encoder = intel_attached_encoder(connector);
3397 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3398 continue;
3399
3400 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003401 if (!intel_dig_port->dp.can_mst)
3402 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003403
Jim Bride40ae80c2016-04-14 10:18:37 -07003404 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003405 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003406 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3407 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003408 drm_connector_list_iter_end(&conn_iter);
3409
Dave Airlie11bed952014-05-12 15:22:27 +10003410 return 0;
3411}
3412
Todd Previteeb3394fa2015-04-18 00:04:19 -07003413static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003414 const char __user *ubuf,
3415 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003416{
3417 char *input_buffer;
3418 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003419 struct drm_device *dev;
3420 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003421 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003422 struct intel_dp *intel_dp;
3423 int val = 0;
3424
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303425 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003426
Todd Previteeb3394fa2015-04-18 00:04:19 -07003427 if (len == 0)
3428 return 0;
3429
Geliang Tang261aeba2017-05-06 23:40:17 +08003430 input_buffer = memdup_user_nul(ubuf, len);
3431 if (IS_ERR(input_buffer))
3432 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003433
Todd Previteeb3394fa2015-04-18 00:04:19 -07003434 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3435
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003436 drm_connector_list_iter_begin(dev, &conn_iter);
3437 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003438 struct intel_encoder *encoder;
3439
Todd Previteeb3394fa2015-04-18 00:04:19 -07003440 if (connector->connector_type !=
3441 DRM_MODE_CONNECTOR_DisplayPort)
3442 continue;
3443
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003444 encoder = to_intel_encoder(connector->encoder);
3445 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3446 continue;
3447
3448 if (encoder && connector->status == connector_status_connected) {
3449 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003450 status = kstrtoint(input_buffer, 10, &val);
3451 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003452 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003453 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3454 /* To prevent erroneous activation of the compliance
3455 * testing code, only accept an actual value of 1 here
3456 */
3457 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003458 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003459 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003460 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003461 }
3462 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003463 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003464 kfree(input_buffer);
3465 if (status < 0)
3466 return status;
3467
3468 *offp += len;
3469 return len;
3470}
3471
3472static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3473{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003474 struct drm_i915_private *dev_priv = m->private;
3475 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003476 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003477 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003478 struct intel_dp *intel_dp;
3479
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003480 drm_connector_list_iter_begin(dev, &conn_iter);
3481 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003482 struct intel_encoder *encoder;
3483
Todd Previteeb3394fa2015-04-18 00:04:19 -07003484 if (connector->connector_type !=
3485 DRM_MODE_CONNECTOR_DisplayPort)
3486 continue;
3487
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003488 encoder = to_intel_encoder(connector->encoder);
3489 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3490 continue;
3491
3492 if (encoder && connector->status == connector_status_connected) {
3493 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003494 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003495 seq_puts(m, "1");
3496 else
3497 seq_puts(m, "0");
3498 } else
3499 seq_puts(m, "0");
3500 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003501 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003502
3503 return 0;
3504}
3505
3506static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003507 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003508{
David Weinehall36cdd012016-08-22 13:59:31 +03003509 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003510 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003511}
3512
3513static const struct file_operations i915_displayport_test_active_fops = {
3514 .owner = THIS_MODULE,
3515 .open = i915_displayport_test_active_open,
3516 .read = seq_read,
3517 .llseek = seq_lseek,
3518 .release = single_release,
3519 .write = i915_displayport_test_active_write
3520};
3521
3522static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3523{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003524 struct drm_i915_private *dev_priv = m->private;
3525 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003526 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003527 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003528 struct intel_dp *intel_dp;
3529
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003530 drm_connector_list_iter_begin(dev, &conn_iter);
3531 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003532 struct intel_encoder *encoder;
3533
Todd Previteeb3394fa2015-04-18 00:04:19 -07003534 if (connector->connector_type !=
3535 DRM_MODE_CONNECTOR_DisplayPort)
3536 continue;
3537
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003538 encoder = to_intel_encoder(connector->encoder);
3539 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3540 continue;
3541
3542 if (encoder && connector->status == connector_status_connected) {
3543 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003544 if (intel_dp->compliance.test_type ==
3545 DP_TEST_LINK_EDID_READ)
3546 seq_printf(m, "%lx",
3547 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003548 else if (intel_dp->compliance.test_type ==
3549 DP_TEST_LINK_VIDEO_PATTERN) {
3550 seq_printf(m, "hdisplay: %d\n",
3551 intel_dp->compliance.test_data.hdisplay);
3552 seq_printf(m, "vdisplay: %d\n",
3553 intel_dp->compliance.test_data.vdisplay);
3554 seq_printf(m, "bpc: %u\n",
3555 intel_dp->compliance.test_data.bpc);
3556 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003557 } else
3558 seq_puts(m, "0");
3559 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003560 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003561
3562 return 0;
3563}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003564DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003565
3566static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3567{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003568 struct drm_i915_private *dev_priv = m->private;
3569 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003570 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003571 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003572 struct intel_dp *intel_dp;
3573
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003574 drm_connector_list_iter_begin(dev, &conn_iter);
3575 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003576 struct intel_encoder *encoder;
3577
Todd Previteeb3394fa2015-04-18 00:04:19 -07003578 if (connector->connector_type !=
3579 DRM_MODE_CONNECTOR_DisplayPort)
3580 continue;
3581
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003582 encoder = to_intel_encoder(connector->encoder);
3583 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3584 continue;
3585
3586 if (encoder && connector->status == connector_status_connected) {
3587 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003588 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003589 } else
3590 seq_puts(m, "0");
3591 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003592 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003593
3594 return 0;
3595}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003596DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003597
Jani Nikulae5315212019-01-16 11:15:23 +02003598static void wm_latency_show(struct seq_file *m, const u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003599{
David Weinehall36cdd012016-08-22 13:59:31 +03003600 struct drm_i915_private *dev_priv = m->private;
3601 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003602 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003603 int num_levels;
3604
David Weinehall36cdd012016-08-22 13:59:31 +03003605 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003606 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003607 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003608 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003609 else if (IS_G4X(dev_priv))
3610 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003611 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003612 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003613
3614 drm_modeset_lock_all(dev);
3615
3616 for (level = 0; level < num_levels; level++) {
3617 unsigned int latency = wm[level];
3618
Damien Lespiau97e94b22014-11-04 17:06:50 +00003619 /*
3620 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003621 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003622 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003623 if (INTEL_GEN(dev_priv) >= 9 ||
3624 IS_VALLEYVIEW(dev_priv) ||
3625 IS_CHERRYVIEW(dev_priv) ||
3626 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003627 latency *= 10;
3628 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003629 latency *= 5;
3630
3631 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003632 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003633 }
3634
3635 drm_modeset_unlock_all(dev);
3636}
3637
3638static int pri_wm_latency_show(struct seq_file *m, void *data)
3639{
David Weinehall36cdd012016-08-22 13:59:31 +03003640 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003641 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003642
David Weinehall36cdd012016-08-22 13:59:31 +03003643 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003644 latencies = dev_priv->wm.skl_latency;
3645 else
David Weinehall36cdd012016-08-22 13:59:31 +03003646 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003647
3648 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003649
3650 return 0;
3651}
3652
3653static int spr_wm_latency_show(struct seq_file *m, void *data)
3654{
David Weinehall36cdd012016-08-22 13:59:31 +03003655 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003656 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003657
David Weinehall36cdd012016-08-22 13:59:31 +03003658 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003659 latencies = dev_priv->wm.skl_latency;
3660 else
David Weinehall36cdd012016-08-22 13:59:31 +03003661 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003662
3663 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003664
3665 return 0;
3666}
3667
3668static int cur_wm_latency_show(struct seq_file *m, void *data)
3669{
David Weinehall36cdd012016-08-22 13:59:31 +03003670 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003671 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003672
David Weinehall36cdd012016-08-22 13:59:31 +03003673 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003674 latencies = dev_priv->wm.skl_latency;
3675 else
David Weinehall36cdd012016-08-22 13:59:31 +03003676 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003677
3678 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003679
3680 return 0;
3681}
3682
3683static int pri_wm_latency_open(struct inode *inode, struct file *file)
3684{
David Weinehall36cdd012016-08-22 13:59:31 +03003685 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003686
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003687 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003688 return -ENODEV;
3689
David Weinehall36cdd012016-08-22 13:59:31 +03003690 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003691}
3692
3693static int spr_wm_latency_open(struct inode *inode, struct file *file)
3694{
David Weinehall36cdd012016-08-22 13:59:31 +03003695 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003696
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003697 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003698 return -ENODEV;
3699
David Weinehall36cdd012016-08-22 13:59:31 +03003700 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003701}
3702
3703static int cur_wm_latency_open(struct inode *inode, struct file *file)
3704{
David Weinehall36cdd012016-08-22 13:59:31 +03003705 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003706
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003707 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003708 return -ENODEV;
3709
David Weinehall36cdd012016-08-22 13:59:31 +03003710 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003711}
3712
3713static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Jani Nikulae5315212019-01-16 11:15:23 +02003714 size_t len, loff_t *offp, u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003715{
3716 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003717 struct drm_i915_private *dev_priv = m->private;
3718 struct drm_device *dev = &dev_priv->drm;
Jani Nikulae5315212019-01-16 11:15:23 +02003719 u16 new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003720 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003721 int level;
3722 int ret;
3723 char tmp[32];
3724
David Weinehall36cdd012016-08-22 13:59:31 +03003725 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003726 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003727 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003728 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003729 else if (IS_G4X(dev_priv))
3730 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003731 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003732 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003733
Ville Syrjälä369a1342014-01-22 14:36:08 +02003734 if (len >= sizeof(tmp))
3735 return -EINVAL;
3736
3737 if (copy_from_user(tmp, ubuf, len))
3738 return -EFAULT;
3739
3740 tmp[len] = '\0';
3741
Damien Lespiau97e94b22014-11-04 17:06:50 +00003742 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3743 &new[0], &new[1], &new[2], &new[3],
3744 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003745 if (ret != num_levels)
3746 return -EINVAL;
3747
3748 drm_modeset_lock_all(dev);
3749
3750 for (level = 0; level < num_levels; level++)
3751 wm[level] = new[level];
3752
3753 drm_modeset_unlock_all(dev);
3754
3755 return len;
3756}
3757
3758
3759static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3760 size_t len, loff_t *offp)
3761{
3762 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003763 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003764 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003765
David Weinehall36cdd012016-08-22 13:59:31 +03003766 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003767 latencies = dev_priv->wm.skl_latency;
3768 else
David Weinehall36cdd012016-08-22 13:59:31 +03003769 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003770
3771 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003772}
3773
3774static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3775 size_t len, loff_t *offp)
3776{
3777 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003778 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003779 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003780
David Weinehall36cdd012016-08-22 13:59:31 +03003781 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003782 latencies = dev_priv->wm.skl_latency;
3783 else
David Weinehall36cdd012016-08-22 13:59:31 +03003784 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003785
3786 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003787}
3788
3789static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3790 size_t len, loff_t *offp)
3791{
3792 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003793 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003794 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003795
David Weinehall36cdd012016-08-22 13:59:31 +03003796 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003797 latencies = dev_priv->wm.skl_latency;
3798 else
David Weinehall36cdd012016-08-22 13:59:31 +03003799 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003800
3801 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003802}
3803
3804static const struct file_operations i915_pri_wm_latency_fops = {
3805 .owner = THIS_MODULE,
3806 .open = pri_wm_latency_open,
3807 .read = seq_read,
3808 .llseek = seq_lseek,
3809 .release = single_release,
3810 .write = pri_wm_latency_write
3811};
3812
3813static const struct file_operations i915_spr_wm_latency_fops = {
3814 .owner = THIS_MODULE,
3815 .open = spr_wm_latency_open,
3816 .read = seq_read,
3817 .llseek = seq_lseek,
3818 .release = single_release,
3819 .write = spr_wm_latency_write
3820};
3821
3822static const struct file_operations i915_cur_wm_latency_fops = {
3823 .owner = THIS_MODULE,
3824 .open = cur_wm_latency_open,
3825 .read = seq_read,
3826 .llseek = seq_lseek,
3827 .release = single_release,
3828 .write = cur_wm_latency_write
3829};
3830
Kees Cook647416f2013-03-10 14:10:06 -07003831static int
3832i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003833{
Chris Wilsonc41166f2019-02-20 14:56:37 +00003834 int ret = i915_terminally_wedged(data);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003835
Chris Wilsonc41166f2019-02-20 14:56:37 +00003836 switch (ret) {
3837 case -EIO:
3838 *val = 1;
3839 return 0;
3840 case 0:
3841 *val = 0;
3842 return 0;
3843 default:
3844 return ret;
3845 }
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003846}
3847
Kees Cook647416f2013-03-10 14:10:06 -07003848static int
3849i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003850{
Chris Wilson598b6b52017-03-25 13:47:35 +00003851 struct drm_i915_private *i915 = data;
Imre Deakd46c0512014-04-14 20:24:27 +03003852
Chris Wilson15cbf002019-02-08 15:37:06 +00003853 /* Flush any previous reset before applying for a new one */
3854 wait_event(i915->gpu_error.reset_queue,
3855 !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003856
Chris Wilsonce800752018-03-20 10:04:49 +00003857 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3858 "Manually set wedged engine mask = %llx", val);
Kees Cook647416f2013-03-10 14:10:06 -07003859 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003860}
3861
Kees Cook647416f2013-03-10 14:10:06 -07003862DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3863 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03003864 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003865
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003866#define DROP_UNBOUND BIT(0)
3867#define DROP_BOUND BIT(1)
3868#define DROP_RETIRE BIT(2)
3869#define DROP_ACTIVE BIT(3)
3870#define DROP_FREED BIT(4)
3871#define DROP_SHRINK_ALL BIT(5)
3872#define DROP_IDLE BIT(6)
Chris Wilson6b048702018-09-03 09:33:37 +01003873#define DROP_RESET_ACTIVE BIT(7)
3874#define DROP_RESET_SEQNO BIT(8)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003875#define DROP_ALL (DROP_UNBOUND | \
3876 DROP_BOUND | \
3877 DROP_RETIRE | \
3878 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00003879 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003880 DROP_SHRINK_ALL |\
Chris Wilson6b048702018-09-03 09:33:37 +01003881 DROP_IDLE | \
3882 DROP_RESET_ACTIVE | \
3883 DROP_RESET_SEQNO)
Kees Cook647416f2013-03-10 14:10:06 -07003884static int
3885i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003886{
Kees Cook647416f2013-03-10 14:10:06 -07003887 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00003888
Kees Cook647416f2013-03-10 14:10:06 -07003889 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003890}
3891
Kees Cook647416f2013-03-10 14:10:06 -07003892static int
3893i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003894{
Chris Wilson6b048702018-09-03 09:33:37 +01003895 struct drm_i915_private *i915 = data;
Chris Wilsondd624af2013-01-15 12:39:35 +00003896
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003897 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3898 val, val & DROP_ALL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003899
Chris Wilsonad4062d2019-01-28 01:02:18 +00003900 if (val & DROP_RESET_ACTIVE &&
3901 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
Chris Wilson6b048702018-09-03 09:33:37 +01003902 i915_gem_set_wedged(i915);
3903
Chris Wilsondd624af2013-01-15 12:39:35 +00003904 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3905 * on ioctls on -EAGAIN. */
Chris Wilson6b048702018-09-03 09:33:37 +01003906 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
Chris Wilson6cffeb82019-03-18 09:51:49 +00003907 int ret;
3908
Chris Wilson6b048702018-09-03 09:33:37 +01003909 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00003910 if (ret)
Chris Wilson6cffeb82019-03-18 09:51:49 +00003911 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00003912
Chris Wilson00c26cf2017-05-24 17:26:53 +01003913 if (val & DROP_ACTIVE)
Chris Wilson6b048702018-09-03 09:33:37 +01003914 ret = i915_gem_wait_for_idle(i915,
Chris Wilson00c26cf2017-05-24 17:26:53 +01003915 I915_WAIT_INTERRUPTIBLE |
Chris Wilsonec625fb2018-07-09 13:20:42 +01003916 I915_WAIT_LOCKED,
3917 MAX_SCHEDULE_TIMEOUT);
Chris Wilson00c26cf2017-05-24 17:26:53 +01003918
Chris Wilson6b048702018-09-03 09:33:37 +01003919 if (val & DROP_RETIRE)
3920 i915_retire_requests(i915);
3921
3922 mutex_unlock(&i915->drm.struct_mutex);
3923 }
3924
Chris Wilsonc41166f2019-02-20 14:56:37 +00003925 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
Chris Wilson6b048702018-09-03 09:33:37 +01003926 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003927
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003928 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01003929 if (val & DROP_BOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003930 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01003931
Chris Wilson21ab4e72014-09-09 11:16:08 +01003932 if (val & DROP_UNBOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003933 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00003934
Chris Wilson8eadc192017-03-08 14:46:22 +00003935 if (val & DROP_SHRINK_ALL)
Chris Wilson6b048702018-09-03 09:33:37 +01003936 i915_gem_shrink_all(i915);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003937 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00003938
Chris Wilson4dfacb02018-05-31 09:22:43 +01003939 if (val & DROP_IDLE) {
3940 do {
Chris Wilson79ffac852019-04-24 21:07:17 +01003941 flush_delayed_work(&i915->gem.retire_work);
Chris Wilson23c3c3d2019-04-24 21:07:14 +01003942 drain_delayed_work(&i915->gem.idle_work);
Chris Wilson6b048702018-09-03 09:33:37 +01003943 } while (READ_ONCE(i915->gt.awake));
Chris Wilson4dfacb02018-05-31 09:22:43 +01003944 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003945
Chris Wilsonc9c704712018-02-19 22:06:31 +00003946 if (val & DROP_FREED)
Chris Wilson6b048702018-09-03 09:33:37 +01003947 i915_gem_drain_freed_objects(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003948
Chris Wilson6cffeb82019-03-18 09:51:49 +00003949 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003950}
3951
Kees Cook647416f2013-03-10 14:10:06 -07003952DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3953 i915_drop_caches_get, i915_drop_caches_set,
3954 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00003955
Kees Cook647416f2013-03-10 14:10:06 -07003956static int
Kees Cook647416f2013-03-10 14:10:06 -07003957i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003958{
David Weinehall36cdd012016-08-22 13:59:31 +03003959 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003960 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00003961 u32 snpcr = 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003962
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003963 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003964 return -ENODEV;
3965
Chris Wilsond4225a52019-01-14 14:21:23 +00003966 with_intel_runtime_pm(dev_priv, wakeref)
3967 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003968
Kees Cook647416f2013-03-10 14:10:06 -07003969 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003970
Kees Cook647416f2013-03-10 14:10:06 -07003971 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003972}
3973
Kees Cook647416f2013-03-10 14:10:06 -07003974static int
3975i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003976{
David Weinehall36cdd012016-08-22 13:59:31 +03003977 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003978 intel_wakeref_t wakeref;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003979
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003980 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003981 return -ENODEV;
3982
Kees Cook647416f2013-03-10 14:10:06 -07003983 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003984 return -EINVAL;
3985
Kees Cook647416f2013-03-10 14:10:06 -07003986 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Chris Wilsond4225a52019-01-14 14:21:23 +00003987 with_intel_runtime_pm(dev_priv, wakeref) {
3988 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003989
Chris Wilsond4225a52019-01-14 14:21:23 +00003990 /* Update the cache sharing policy here as well */
3991 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3992 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3993 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3994 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3995 }
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003996
Kees Cook647416f2013-03-10 14:10:06 -07003997 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003998}
3999
Kees Cook647416f2013-03-10 14:10:06 -07004000DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4001 i915_cache_sharing_get, i915_cache_sharing_set,
4002 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004003
David Weinehall36cdd012016-08-22 13:59:31 +03004004static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004005 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004006{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004007#define SS_MAX 2
4008 const int ss_max = SS_MAX;
4009 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004010 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004011
4012 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4013 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4014 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4015 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4016
4017 for (ss = 0; ss < ss_max; ss++) {
4018 unsigned int eu_cnt;
4019
4020 if (sig1[ss] & CHV_SS_PG_ENABLE)
4021 /* skip disabled subslice */
4022 continue;
4023
Imre Deakf08a0c92016-08-31 19:13:04 +03004024 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004025 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004026 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4027 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4028 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4029 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004030 sseu->eu_total += eu_cnt;
4031 sseu->eu_per_subslice = max_t(unsigned int,
4032 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004033 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004034#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004035}
4036
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004037static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4038 struct sseu_dev_info *sseu)
4039{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004040#define SS_MAX 6
Jani Nikula02584042018-12-31 16:56:41 +02004041 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004042 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004043 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004044
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004045 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004046 /*
4047 * FIXME: Valid SS Mask respects the spec and read
Alexandre Belloni3c64ea82018-11-20 16:14:15 +01004048 * only valid bits for those registers, excluding reserved
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004049 * although this seems wrong because it would leave many
4050 * subslices without ACK.
4051 */
4052 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4053 GEN10_PGCTL_VALID_SS_MASK(s);
4054 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4055 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4056 }
4057
4058 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4059 GEN9_PGCTL_SSA_EU19_ACK |
4060 GEN9_PGCTL_SSA_EU210_ACK |
4061 GEN9_PGCTL_SSA_EU311_ACK;
4062 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4063 GEN9_PGCTL_SSB_EU19_ACK |
4064 GEN9_PGCTL_SSB_EU210_ACK |
4065 GEN9_PGCTL_SSB_EU311_ACK;
4066
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004067 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004068 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4069 /* skip disabled slice */
4070 continue;
4071
4072 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004073 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004074
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004075 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004076 unsigned int eu_cnt;
4077
4078 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4079 /* skip disabled subslice */
4080 continue;
4081
4082 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4083 eu_mask[ss % 2]);
4084 sseu->eu_total += eu_cnt;
4085 sseu->eu_per_subslice = max_t(unsigned int,
4086 sseu->eu_per_subslice,
4087 eu_cnt);
4088 }
4089 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004090#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004091}
4092
David Weinehall36cdd012016-08-22 13:59:31 +03004093static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004094 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004095{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004096#define SS_MAX 3
Jani Nikula02584042018-12-31 16:56:41 +02004097 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004098 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004099 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004100
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004101 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004102 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4103 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4104 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4105 }
4106
Jeff McGee5d395252015-04-03 18:13:17 -07004107 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4108 GEN9_PGCTL_SSA_EU19_ACK |
4109 GEN9_PGCTL_SSA_EU210_ACK |
4110 GEN9_PGCTL_SSA_EU311_ACK;
4111 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4112 GEN9_PGCTL_SSB_EU19_ACK |
4113 GEN9_PGCTL_SSB_EU210_ACK |
4114 GEN9_PGCTL_SSB_EU311_ACK;
4115
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004116 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004117 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4118 /* skip disabled slice */
4119 continue;
4120
Imre Deakf08a0c92016-08-31 19:13:04 +03004121 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004122
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004123 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004124 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004125 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004126
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004127 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004128 unsigned int eu_cnt;
4129
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004130 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004131 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4132 /* skip disabled subslice */
4133 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004134
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004135 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004136 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004137
Jeff McGee5d395252015-04-03 18:13:17 -07004138 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4139 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004140 sseu->eu_total += eu_cnt;
4141 sseu->eu_per_subslice = max_t(unsigned int,
4142 sseu->eu_per_subslice,
4143 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004144 }
4145 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004146#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004147}
4148
David Weinehall36cdd012016-08-22 13:59:31 +03004149static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004150 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004151{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004152 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004153 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004154
Imre Deakf08a0c92016-08-31 19:13:04 +03004155 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004156
Imre Deakf08a0c92016-08-31 19:13:04 +03004157 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004158 sseu->eu_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004159 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004160 for (s = 0; s < fls(sseu->slice_mask); s++) {
4161 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004162 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004163 }
Imre Deak57ec1712016-08-31 19:13:05 +03004164 sseu->eu_total = sseu->eu_per_subslice *
4165 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004166
4167 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004168 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004169 u8 subslice_7eu =
Jani Nikula02584042018-12-31 16:56:41 +02004170 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004171
Imre Deak915490d2016-08-31 19:13:01 +03004172 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004173 }
4174 }
4175}
4176
Imre Deak615d8902016-08-31 19:13:03 +03004177static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4178 const struct sseu_dev_info *sseu)
4179{
4180 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4181 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004182 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004183
Imre Deakc67ba532016-08-31 19:13:06 +03004184 seq_printf(m, " %s Slice Mask: %04x\n", type,
4185 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004186 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004187 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004188 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004189 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004190 for (s = 0; s < fls(sseu->slice_mask); s++) {
4191 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4192 s, hweight8(sseu->subslice_mask[s]));
4193 }
Imre Deak615d8902016-08-31 19:13:03 +03004194 seq_printf(m, " %s EU Total: %u\n", type,
4195 sseu->eu_total);
4196 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4197 sseu->eu_per_subslice);
4198
4199 if (!is_available_info)
4200 return;
4201
4202 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4203 if (HAS_POOLED_EU(dev_priv))
4204 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4205
4206 seq_printf(m, " Has Slice Power Gating: %s\n",
4207 yesno(sseu->has_slice_pg));
4208 seq_printf(m, " Has Subslice Power Gating: %s\n",
4209 yesno(sseu->has_subslice_pg));
4210 seq_printf(m, " Has EU Power Gating: %s\n",
4211 yesno(sseu->has_eu_pg));
4212}
4213
Jeff McGee38732182015-02-13 10:27:54 -06004214static int i915_sseu_status(struct seq_file *m, void *unused)
4215{
David Weinehall36cdd012016-08-22 13:59:31 +03004216 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004217 struct sseu_dev_info sseu;
Chris Wilsona0371212019-01-14 14:21:14 +00004218 intel_wakeref_t wakeref;
Jeff McGee38732182015-02-13 10:27:54 -06004219
David Weinehall36cdd012016-08-22 13:59:31 +03004220 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004221 return -ENODEV;
4222
4223 seq_puts(m, "SSEU Device Info\n");
Jani Nikula02584042018-12-31 16:56:41 +02004224 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004225
Jeff McGee7f992ab2015-02-13 10:27:55 -06004226 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004227 memset(&sseu, 0, sizeof(sseu));
Jani Nikula02584042018-12-31 16:56:41 +02004228 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4229 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004230 sseu.max_eus_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004231 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004232
Chris Wilsond4225a52019-01-14 14:21:23 +00004233 with_intel_runtime_pm(dev_priv, wakeref) {
4234 if (IS_CHERRYVIEW(dev_priv))
4235 cherryview_sseu_device_status(dev_priv, &sseu);
4236 else if (IS_BROADWELL(dev_priv))
4237 broadwell_sseu_device_status(dev_priv, &sseu);
4238 else if (IS_GEN(dev_priv, 9))
4239 gen9_sseu_device_status(dev_priv, &sseu);
4240 else if (INTEL_GEN(dev_priv) >= 10)
4241 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004242 }
David Weinehall238010e2016-08-01 17:33:27 +03004243
Imre Deak615d8902016-08-31 19:13:03 +03004244 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004245
Jeff McGee38732182015-02-13 10:27:54 -06004246 return 0;
4247}
4248
Ben Widawsky6d794d42011-04-25 11:25:56 -07004249static int i915_forcewake_open(struct inode *inode, struct file *file)
4250{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004251 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004252
Chris Wilsond7a133d2017-09-07 14:44:41 +01004253 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004254 return 0;
4255
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004256 file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07004257 intel_uncore_forcewake_user_get(&i915->uncore);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004258
4259 return 0;
4260}
4261
Ben Widawskyc43b5632012-04-16 14:07:40 -07004262static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004263{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004264 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004265
Chris Wilsond7a133d2017-09-07 14:44:41 +01004266 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004267 return 0;
4268
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07004269 intel_uncore_forcewake_user_put(&i915->uncore);
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004270 intel_runtime_pm_put(i915,
4271 (intel_wakeref_t)(uintptr_t)file->private_data);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004272
4273 return 0;
4274}
4275
4276static const struct file_operations i915_forcewake_fops = {
4277 .owner = THIS_MODULE,
4278 .open = i915_forcewake_open,
4279 .release = i915_forcewake_release,
4280};
4281
Lyude317eaa92017-02-03 21:18:25 -05004282static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4283{
4284 struct drm_i915_private *dev_priv = m->private;
4285 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4286
Lyude Paul6fc5d782018-11-20 19:37:17 -05004287 /* Synchronize with everything first in case there's been an HPD
4288 * storm, but we haven't finished handling it in the kernel yet
4289 */
4290 synchronize_irq(dev_priv->drm.irq);
4291 flush_work(&dev_priv->hotplug.dig_port_work);
4292 flush_work(&dev_priv->hotplug.hotplug_work);
4293
Lyude317eaa92017-02-03 21:18:25 -05004294 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4295 seq_printf(m, "Detected: %s\n",
4296 yesno(delayed_work_pending(&hotplug->reenable_work)));
4297
4298 return 0;
4299}
4300
4301static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4302 const char __user *ubuf, size_t len,
4303 loff_t *offp)
4304{
4305 struct seq_file *m = file->private_data;
4306 struct drm_i915_private *dev_priv = m->private;
4307 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4308 unsigned int new_threshold;
4309 int i;
4310 char *newline;
4311 char tmp[16];
4312
4313 if (len >= sizeof(tmp))
4314 return -EINVAL;
4315
4316 if (copy_from_user(tmp, ubuf, len))
4317 return -EFAULT;
4318
4319 tmp[len] = '\0';
4320
4321 /* Strip newline, if any */
4322 newline = strchr(tmp, '\n');
4323 if (newline)
4324 *newline = '\0';
4325
4326 if (strcmp(tmp, "reset") == 0)
4327 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4328 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4329 return -EINVAL;
4330
4331 if (new_threshold > 0)
4332 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4333 new_threshold);
4334 else
4335 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4336
4337 spin_lock_irq(&dev_priv->irq_lock);
4338 hotplug->hpd_storm_threshold = new_threshold;
4339 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4340 for_each_hpd_pin(i)
4341 hotplug->stats[i].count = 0;
4342 spin_unlock_irq(&dev_priv->irq_lock);
4343
4344 /* Re-enable hpd immediately if we were in an irq storm */
4345 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4346
4347 return len;
4348}
4349
4350static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4351{
4352 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4353}
4354
4355static const struct file_operations i915_hpd_storm_ctl_fops = {
4356 .owner = THIS_MODULE,
4357 .open = i915_hpd_storm_ctl_open,
4358 .read = seq_read,
4359 .llseek = seq_lseek,
4360 .release = single_release,
4361 .write = i915_hpd_storm_ctl_write
4362};
4363
Lyude Paul9a64c652018-11-06 16:30:16 -05004364static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4365{
4366 struct drm_i915_private *dev_priv = m->private;
4367
4368 seq_printf(m, "Enabled: %s\n",
4369 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4370
4371 return 0;
4372}
4373
4374static int
4375i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4376{
4377 return single_open(file, i915_hpd_short_storm_ctl_show,
4378 inode->i_private);
4379}
4380
4381static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4382 const char __user *ubuf,
4383 size_t len, loff_t *offp)
4384{
4385 struct seq_file *m = file->private_data;
4386 struct drm_i915_private *dev_priv = m->private;
4387 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4388 char *newline;
4389 char tmp[16];
4390 int i;
4391 bool new_state;
4392
4393 if (len >= sizeof(tmp))
4394 return -EINVAL;
4395
4396 if (copy_from_user(tmp, ubuf, len))
4397 return -EFAULT;
4398
4399 tmp[len] = '\0';
4400
4401 /* Strip newline, if any */
4402 newline = strchr(tmp, '\n');
4403 if (newline)
4404 *newline = '\0';
4405
4406 /* Reset to the "default" state for this system */
4407 if (strcmp(tmp, "reset") == 0)
4408 new_state = !HAS_DP_MST(dev_priv);
4409 else if (kstrtobool(tmp, &new_state) != 0)
4410 return -EINVAL;
4411
4412 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4413 new_state ? "En" : "Dis");
4414
4415 spin_lock_irq(&dev_priv->irq_lock);
4416 hotplug->hpd_short_storm_enabled = new_state;
4417 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4418 for_each_hpd_pin(i)
4419 hotplug->stats[i].count = 0;
4420 spin_unlock_irq(&dev_priv->irq_lock);
4421
4422 /* Re-enable hpd immediately if we were in an irq storm */
4423 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4424
4425 return len;
4426}
4427
4428static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4429 .owner = THIS_MODULE,
4430 .open = i915_hpd_short_storm_ctl_open,
4431 .read = seq_read,
4432 .llseek = seq_lseek,
4433 .release = single_release,
4434 .write = i915_hpd_short_storm_ctl_write,
4435};
4436
C, Ramalingam35954e82017-11-08 00:08:23 +05304437static int i915_drrs_ctl_set(void *data, u64 val)
4438{
4439 struct drm_i915_private *dev_priv = data;
4440 struct drm_device *dev = &dev_priv->drm;
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004441 struct intel_crtc *crtc;
C, Ramalingam35954e82017-11-08 00:08:23 +05304442
4443 if (INTEL_GEN(dev_priv) < 7)
4444 return -ENODEV;
4445
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004446 for_each_intel_crtc(dev, crtc) {
4447 struct drm_connector_list_iter conn_iter;
4448 struct intel_crtc_state *crtc_state;
4449 struct drm_connector *connector;
4450 struct drm_crtc_commit *commit;
4451 int ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304452
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004453 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4454 if (ret)
4455 return ret;
4456
4457 crtc_state = to_intel_crtc_state(crtc->base.state);
4458
4459 if (!crtc_state->base.active ||
4460 !crtc_state->has_drrs)
4461 goto out;
4462
4463 commit = crtc_state->base.commit;
4464 if (commit) {
4465 ret = wait_for_completion_interruptible(&commit->hw_done);
4466 if (ret)
4467 goto out;
4468 }
4469
4470 drm_connector_list_iter_begin(dev, &conn_iter);
4471 drm_for_each_connector_iter(connector, &conn_iter) {
4472 struct intel_encoder *encoder;
4473 struct intel_dp *intel_dp;
4474
4475 if (!(crtc_state->base.connector_mask &
4476 drm_connector_mask(connector)))
4477 continue;
4478
4479 encoder = intel_attached_encoder(connector);
C, Ramalingam35954e82017-11-08 00:08:23 +05304480 if (encoder->type != INTEL_OUTPUT_EDP)
4481 continue;
4482
4483 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4484 val ? "en" : "dis", val);
4485
4486 intel_dp = enc_to_intel_dp(&encoder->base);
4487 if (val)
4488 intel_edp_drrs_enable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004489 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304490 else
4491 intel_edp_drrs_disable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004492 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304493 }
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004494 drm_connector_list_iter_end(&conn_iter);
4495
4496out:
4497 drm_modeset_unlock(&crtc->base.mutex);
4498 if (ret)
4499 return ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304500 }
C, Ramalingam35954e82017-11-08 00:08:23 +05304501
4502 return 0;
4503}
4504
4505DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4506
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004507static ssize_t
4508i915_fifo_underrun_reset_write(struct file *filp,
4509 const char __user *ubuf,
4510 size_t cnt, loff_t *ppos)
4511{
4512 struct drm_i915_private *dev_priv = filp->private_data;
4513 struct intel_crtc *intel_crtc;
4514 struct drm_device *dev = &dev_priv->drm;
4515 int ret;
4516 bool reset;
4517
4518 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4519 if (ret)
4520 return ret;
4521
4522 if (!reset)
4523 return cnt;
4524
4525 for_each_intel_crtc(dev, intel_crtc) {
4526 struct drm_crtc_commit *commit;
4527 struct intel_crtc_state *crtc_state;
4528
4529 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4530 if (ret)
4531 return ret;
4532
4533 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4534 commit = crtc_state->base.commit;
4535 if (commit) {
4536 ret = wait_for_completion_interruptible(&commit->hw_done);
4537 if (!ret)
4538 ret = wait_for_completion_interruptible(&commit->flip_done);
4539 }
4540
4541 if (!ret && crtc_state->base.active) {
4542 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4543 pipe_name(intel_crtc->pipe));
4544
4545 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4546 }
4547
4548 drm_modeset_unlock(&intel_crtc->base.mutex);
4549
4550 if (ret)
4551 return ret;
4552 }
4553
4554 ret = intel_fbc_reset_underrun(dev_priv);
4555 if (ret)
4556 return ret;
4557
4558 return cnt;
4559}
4560
4561static const struct file_operations i915_fifo_underrun_reset_ops = {
4562 .owner = THIS_MODULE,
4563 .open = simple_open,
4564 .write = i915_fifo_underrun_reset_write,
4565 .llseek = default_llseek,
4566};
4567
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004568static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004569 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004570 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004571 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004572 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004573 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004574 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004575 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004576 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004577 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004578 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004579 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004580 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004581 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304582 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004583 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004584 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004585 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004586 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004587 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004588 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004589 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004590 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004591 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004592 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004593 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004594 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004595 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004596 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004597 {"i915_swizzle_info", i915_swizzle_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004598 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004599 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004600 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004601 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004602 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004603 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004604 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004605 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004606 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004607 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004608 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004609 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004610 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004611 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004612 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304613 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004614 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004615};
Ben Gamari27c202a2009-07-01 22:26:52 -04004616#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004617
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004618static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004619 const char *name;
4620 const struct file_operations *fops;
4621} i915_debugfs_files[] = {
4622 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004623 {"i915_cache_sharing", &i915_cache_sharing_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004624 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004625#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004626 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004627 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004628#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004629 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004630 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4631 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4632 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004633 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004634 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4635 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304636 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004637 {"i915_guc_log_level", &i915_guc_log_level_fops},
4638 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304639 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
Lyude Paul9a64c652018-11-06 16:30:16 -05004640 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304641 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004642 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4643 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004644};
4645
Chris Wilson1dac8912016-06-24 14:00:17 +01004646int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004647{
Chris Wilson91c8a322016-07-05 10:40:23 +01004648 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004649 struct dentry *ent;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004650 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004651
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004652 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4653 minor->debugfs_root, to_i915(minor->dev),
4654 &i915_forcewake_fops);
4655 if (!ent)
4656 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004657
Daniel Vetter34b96742013-07-04 20:49:44 +02004658 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004659 ent = debugfs_create_file(i915_debugfs_files[i].name,
4660 S_IRUGO | S_IWUSR,
4661 minor->debugfs_root,
4662 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004663 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004664 if (!ent)
4665 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004666 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004667
Ben Gamari27c202a2009-07-01 22:26:52 -04004668 return drm_debugfs_create_files(i915_debugfs_list,
4669 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004670 minor->debugfs_root, minor);
4671}
4672
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004673struct dpcd_block {
4674 /* DPCD dump start address. */
4675 unsigned int offset;
4676 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4677 unsigned int end;
4678 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4679 size_t size;
4680 /* Only valid for eDP. */
4681 bool edp;
4682};
4683
4684static const struct dpcd_block i915_dpcd_debug[] = {
4685 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4686 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4687 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4688 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4689 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4690 { .offset = DP_SET_POWER },
4691 { .offset = DP_EDP_DPCD_REV },
4692 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4693 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4694 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4695};
4696
4697static int i915_dpcd_show(struct seq_file *m, void *data)
4698{
4699 struct drm_connector *connector = m->private;
4700 struct intel_dp *intel_dp =
4701 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Jani Nikulae5315212019-01-16 11:15:23 +02004702 u8 buf[16];
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004703 ssize_t err;
4704 int i;
4705
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004706 if (connector->status != connector_status_connected)
4707 return -ENODEV;
4708
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004709 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4710 const struct dpcd_block *b = &i915_dpcd_debug[i];
4711 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4712
4713 if (b->edp &&
4714 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4715 continue;
4716
4717 /* low tech for now */
4718 if (WARN_ON(size > sizeof(buf)))
4719 continue;
4720
4721 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
Chris Wilson65404c82018-10-10 09:17:06 +01004722 if (err < 0)
4723 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4724 else
4725 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004726 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004727
4728 return 0;
4729}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004730DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004731
David Weinehallecbd6782016-08-23 12:23:56 +03004732static int i915_panel_show(struct seq_file *m, void *data)
4733{
4734 struct drm_connector *connector = m->private;
4735 struct intel_dp *intel_dp =
4736 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4737
4738 if (connector->status != connector_status_connected)
4739 return -ENODEV;
4740
4741 seq_printf(m, "Panel power up delay: %d\n",
4742 intel_dp->panel_power_up_delay);
4743 seq_printf(m, "Panel power down delay: %d\n",
4744 intel_dp->panel_power_down_delay);
4745 seq_printf(m, "Backlight on delay: %d\n",
4746 intel_dp->backlight_on_delay);
4747 seq_printf(m, "Backlight off delay: %d\n",
4748 intel_dp->backlight_off_delay);
4749
4750 return 0;
4751}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004752DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004753
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304754static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4755{
4756 struct drm_connector *connector = m->private;
4757 struct intel_connector *intel_connector = to_intel_connector(connector);
4758
4759 if (connector->status != connector_status_connected)
4760 return -ENODEV;
4761
4762 /* HDCP is supported by connector */
Ramalingam Cd3dacc72018-10-29 15:15:46 +05304763 if (!intel_connector->hdcp.shim)
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304764 return -EINVAL;
4765
4766 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4767 connector->base.id);
4768 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4769 "None" : "HDCP1.4");
4770 seq_puts(m, "\n");
4771
4772 return 0;
4773}
4774DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4775
Manasi Navaree845f092018-12-05 16:54:07 -08004776static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4777{
4778 struct drm_connector *connector = m->private;
4779 struct drm_device *dev = connector->dev;
4780 struct drm_crtc *crtc;
4781 struct intel_dp *intel_dp;
4782 struct drm_modeset_acquire_ctx ctx;
4783 struct intel_crtc_state *crtc_state = NULL;
4784 int ret = 0;
4785 bool try_again = false;
4786
4787 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4788
4789 do {
Manasi Navare6afe8922018-12-19 15:51:20 -08004790 try_again = false;
Manasi Navaree845f092018-12-05 16:54:07 -08004791 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4792 &ctx);
4793 if (ret) {
Chris Wilsonee6df562019-03-29 16:51:52 +00004794 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4795 try_again = true;
4796 continue;
4797 }
Manasi Navaree845f092018-12-05 16:54:07 -08004798 break;
4799 }
4800 crtc = connector->state->crtc;
4801 if (connector->status != connector_status_connected || !crtc) {
4802 ret = -ENODEV;
4803 break;
4804 }
4805 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4806 if (ret == -EDEADLK) {
4807 ret = drm_modeset_backoff(&ctx);
4808 if (!ret) {
4809 try_again = true;
4810 continue;
4811 }
4812 break;
4813 } else if (ret) {
4814 break;
4815 }
4816 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4817 crtc_state = to_intel_crtc_state(crtc->state);
4818 seq_printf(m, "DSC_Enabled: %s\n",
4819 yesno(crtc_state->dsc_params.compression_enable));
Radhakrishna Sripadafed85692019-01-09 13:14:14 -08004820 seq_printf(m, "DSC_Sink_Support: %s\n",
4821 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
Manasi Navarefeb88462019-04-05 15:48:21 -07004822 seq_printf(m, "Force_DSC_Enable: %s\n",
4823 yesno(intel_dp->force_dsc_en));
Manasi Navaree845f092018-12-05 16:54:07 -08004824 if (!intel_dp_is_edp(intel_dp))
4825 seq_printf(m, "FEC_Sink_Support: %s\n",
4826 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4827 } while (try_again);
4828
4829 drm_modeset_drop_locks(&ctx);
4830 drm_modeset_acquire_fini(&ctx);
4831
4832 return ret;
4833}
4834
4835static ssize_t i915_dsc_fec_support_write(struct file *file,
4836 const char __user *ubuf,
4837 size_t len, loff_t *offp)
4838{
4839 bool dsc_enable = false;
4840 int ret;
4841 struct drm_connector *connector =
4842 ((struct seq_file *)file->private_data)->private;
4843 struct intel_encoder *encoder = intel_attached_encoder(connector);
4844 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4845
4846 if (len == 0)
4847 return 0;
4848
4849 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4850 len);
4851
4852 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4853 if (ret < 0)
4854 return ret;
4855
4856 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4857 (dsc_enable) ? "true" : "false");
4858 intel_dp->force_dsc_en = dsc_enable;
4859
4860 *offp += len;
4861 return len;
4862}
4863
4864static int i915_dsc_fec_support_open(struct inode *inode,
4865 struct file *file)
4866{
4867 return single_open(file, i915_dsc_fec_support_show,
4868 inode->i_private);
4869}
4870
4871static const struct file_operations i915_dsc_fec_support_fops = {
4872 .owner = THIS_MODULE,
4873 .open = i915_dsc_fec_support_open,
4874 .read = seq_read,
4875 .llseek = seq_lseek,
4876 .release = single_release,
4877 .write = i915_dsc_fec_support_write
4878};
4879
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004880/**
4881 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4882 * @connector: pointer to a registered drm_connector
4883 *
4884 * Cleanup will be done by drm_connector_unregister() through a call to
4885 * drm_debugfs_connector_remove().
4886 *
4887 * Returns 0 on success, negative error codes on error.
4888 */
4889int i915_debugfs_connector_add(struct drm_connector *connector)
4890{
4891 struct dentry *root = connector->debugfs_entry;
Manasi Navaree845f092018-12-05 16:54:07 -08004892 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004893
4894 /* The connector must have been registered beforehands. */
4895 if (!root)
4896 return -ENODEV;
4897
4898 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4899 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004900 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4901 connector, &i915_dpcd_fops);
4902
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004903 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
David Weinehallecbd6782016-08-23 12:23:56 +03004904 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4905 connector, &i915_panel_fops);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004906 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4907 connector, &i915_psr_sink_status_fops);
4908 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004909
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304910 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4911 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4912 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4913 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4914 connector, &i915_hdcp_sink_capability_fops);
4915 }
4916
Manasi Navaree845f092018-12-05 16:54:07 -08004917 if (INTEL_GEN(dev_priv) >= 10 &&
4918 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4919 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4920 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4921 connector, &i915_dsc_fec_support_fops);
4922
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004923 return 0;
4924}