blob: 37175414ce892a50d49846326efd5e29c836ff3b [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Chris Wilsone637d2c2017-03-16 13:19:57 +000029#include <linux/sort.h>
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010030#include <linux/sched/mm.h>
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010031#include <drm/drm_debugfs.h>
32#include <drm/drm_fourcc.h>
Simon Farnsworth4e5359c2010-09-01 17:47:52 +010033#include "intel_drv.h"
Sagar Arun Kamblea2695742017-11-16 19:02:41 +053034#include "intel_guc_submission.h"
Ben Gamari20172632009-02-17 20:08:50 -050035
Chris Wilson9f588922019-01-16 15:33:04 +000036#include "i915_reset.h"
37
David Weinehall36cdd012016-08-22 13:59:31 +030038static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
39{
40 return to_i915(node->minor->dev);
41}
42
Chris Wilson70d39fe2010-08-25 16:03:34 +010043static int i915_capabilities(struct seq_file *m, void *data)
44{
David Weinehall36cdd012016-08-22 13:59:31 +030045 struct drm_i915_private *dev_priv = node_to_i915(m->private);
46 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000047 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010048
David Weinehall36cdd012016-08-22 13:59:31 +030049 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020050 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030051 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000052
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000053 intel_device_info_dump_flags(info, &p);
Jani Nikula02584042018-12-31 16:56:41 +020054 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000055 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010056
Chris Wilson418e3cd2017-02-06 21:36:08 +000057 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000058 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000059 kernel_param_unlock(THIS_MODULE);
60
Chris Wilson70d39fe2010-08-25 16:03:34 +010061 return 0;
62}
Ben Gamari433e12f2009-02-17 20:08:51 -050063
Imre Deaka7363de2016-05-12 16:18:52 +030064static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000065{
Chris Wilson573adb32016-08-04 16:32:39 +010066 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000067}
68
Imre Deaka7363de2016-05-12 16:18:52 +030069static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010070{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010071 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010072}
73
Imre Deaka7363de2016-05-12 16:18:52 +030074static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000075{
Chris Wilson3e510a82016-08-05 10:14:23 +010076 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040077 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010078 case I915_TILING_NONE: return ' ';
79 case I915_TILING_X: return 'X';
80 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040081 }
Chris Wilsona6172a82009-02-11 14:26:38 +000082}
83
Imre Deaka7363de2016-05-12 16:18:52 +030084static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070085{
Chris Wilsona65adaf2017-10-09 09:43:57 +010086 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010087}
88
Imre Deaka7363de2016-05-12 16:18:52 +030089static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010090{
Chris Wilsona4f5ea62016-10-28 13:58:35 +010091 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -070092}
93
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +010094static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
95{
96 u64 size = 0;
97 struct i915_vma *vma;
98
Chris Wilsone2189dd2017-12-07 21:14:07 +000099 for_each_ggtt_vma(vma, obj) {
100 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100101 size += vma->node.size;
102 }
103
104 return size;
105}
106
Matthew Auld7393b7e2017-10-06 23:18:28 +0100107static const char *
108stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
109{
110 size_t x = 0;
111
112 switch (page_sizes) {
113 case 0:
114 return "";
115 case I915_GTT_PAGE_SIZE_4K:
116 return "4K";
117 case I915_GTT_PAGE_SIZE_64K:
118 return "64K";
119 case I915_GTT_PAGE_SIZE_2M:
120 return "2M";
121 default:
122 if (!buf)
123 return "M";
124
125 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
126 x += snprintf(buf + x, len - x, "2M, ");
127 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
128 x += snprintf(buf + x, len - x, "64K, ");
129 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
130 x += snprintf(buf + x, len - x, "4K, ");
131 buf[x-2] = '\0';
132
133 return buf;
134 }
135}
136
Chris Wilson37811fc2010-08-25 22:45:57 +0100137static void
138describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139{
Chris Wilsonb4716182015-04-27 13:41:17 +0100140 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000141 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700142 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100143 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800144 int pin_count = 0;
145
Chris Wilson188c1ab2016-04-03 14:14:20 +0100146 lockdep_assert_held(&obj->base.dev->struct_mutex);
147
Chris Wilsond07f0e52016-10-28 13:58:44 +0100148 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100149 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100150 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100151 get_pin_flag(obj),
152 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700153 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100154 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800155 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100156 obj->read_domains,
157 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300158 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100159 obj->mm.dirty ? " dirty" : "",
160 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100161 if (obj->base.name)
162 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson528cbd12019-01-28 10:23:54 +0000163 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100164 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800165 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300166 }
167 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100168 if (obj->pin_global)
169 seq_printf(m, " (global)");
Chris Wilson528cbd12019-01-28 10:23:54 +0000170 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100171 if (!drm_mm_node_allocated(&vma->node))
172 continue;
173
Matthew Auld7393b7e2017-10-06 23:18:28 +0100174 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100175 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100176 vma->node.start, vma->node.size,
177 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000178 if (i915_vma_is_ggtt(vma)) {
179 switch (vma->ggtt_view.type) {
180 case I915_GGTT_VIEW_NORMAL:
181 seq_puts(m, ", normal");
182 break;
183
184 case I915_GGTT_VIEW_PARTIAL:
185 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000186 vma->ggtt_view.partial.offset << PAGE_SHIFT,
187 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000188 break;
189
190 case I915_GGTT_VIEW_ROTATED:
191 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000192 vma->ggtt_view.rotated.plane[0].width,
193 vma->ggtt_view.rotated.plane[0].height,
194 vma->ggtt_view.rotated.plane[0].stride,
195 vma->ggtt_view.rotated.plane[0].offset,
196 vma->ggtt_view.rotated.plane[1].width,
197 vma->ggtt_view.rotated.plane[1].height,
198 vma->ggtt_view.rotated.plane[1].stride,
199 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000200 break;
201
202 default:
203 MISSING_CASE(vma->ggtt_view.type);
204 break;
205 }
206 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100207 if (vma->fence)
208 seq_printf(m, " , fence: %d%s",
209 vma->fence->id,
Chris Wilson21950ee2019-02-05 13:00:05 +0000210 i915_active_request_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000211 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700212 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000213 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100214 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100215
Chris Wilsond07f0e52016-10-28 13:58:44 +0100216 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100217 if (engine)
218 seq_printf(m, " (%s)", engine->name);
219
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100220 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
221 if (frontbuffer_bits)
222 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100223}
224
Chris Wilsone637d2c2017-03-16 13:19:57 +0000225static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100226{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000227 const struct drm_i915_gem_object *a =
228 *(const struct drm_i915_gem_object **)A;
229 const struct drm_i915_gem_object *b =
230 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100231
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200232 if (a->stolen->start < b->stolen->start)
233 return -1;
234 if (a->stolen->start > b->stolen->start)
235 return 1;
236 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100237}
238
239static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
240{
David Weinehall36cdd012016-08-22 13:59:31 +0300241 struct drm_i915_private *dev_priv = node_to_i915(m->private);
242 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000243 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100244 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300245 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000246 unsigned long total, count, n;
247 int ret;
248
249 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200250 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000251 if (!objects)
252 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100253
254 ret = mutex_lock_interruptible(&dev->struct_mutex);
255 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000256 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100257
258 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100259
260 spin_lock(&dev_priv->mm.obj_lock);
261 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000262 if (count == total)
263 break;
264
Chris Wilson6d2b88852013-08-07 18:30:54 +0100265 if (obj->stolen == NULL)
266 continue;
267
Chris Wilsone637d2c2017-03-16 13:19:57 +0000268 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100269 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100270 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000271
Chris Wilson6d2b88852013-08-07 18:30:54 +0100272 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100273 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000274 if (count == total)
275 break;
276
Chris Wilson6d2b88852013-08-07 18:30:54 +0100277 if (obj->stolen == NULL)
278 continue;
279
Chris Wilsone637d2c2017-03-16 13:19:57 +0000280 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100281 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100282 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100283 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100284
Chris Wilsone637d2c2017-03-16 13:19:57 +0000285 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
286
287 seq_puts(m, "Stolen:\n");
288 for (n = 0; n < count; n++) {
289 seq_puts(m, " ");
290 describe_obj(m, objects[n]);
291 seq_putc(m, '\n');
292 }
293 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100294 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000295
296 mutex_unlock(&dev->struct_mutex);
297out:
Michal Hocko20981052017-05-17 14:23:12 +0200298 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000299 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100300}
301
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100302struct file_stats {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000303 struct i915_address_space *vm;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300304 unsigned long count;
305 u64 total, unbound;
306 u64 global, shared;
307 u64 active, inactive;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000308 u64 closed;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100309};
310
311static int per_file_stats(int id, void *ptr, void *data)
312{
313 struct drm_i915_gem_object *obj = ptr;
314 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000315 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100316
Chris Wilson0caf81b2017-06-17 12:57:44 +0100317 lockdep_assert_held(&obj->base.dev->struct_mutex);
318
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100319 stats->count++;
320 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100321 if (!obj->bind_count)
322 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000323 if (obj->base.name || obj->base.dma_buf)
324 stats->shared += obj->base.size;
325
Chris Wilson528cbd12019-01-28 10:23:54 +0000326 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100327 if (!drm_mm_node_allocated(&vma->node))
328 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000329
Chris Wilson3272db52016-08-04 16:32:32 +0100330 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100331 stats->global += vma->node.size;
332 } else {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000333 if (vma->vm != stats->vm)
Chris Wilson6313c202014-03-19 13:45:45 +0000334 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000335 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100336
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100337 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100338 stats->active += vma->node.size;
339 else
340 stats->inactive += vma->node.size;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000341
342 if (i915_vma_is_closed(vma))
343 stats->closed += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100344 }
345
346 return 0;
347}
348
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100349#define print_file_stats(m, name, stats) do { \
350 if (stats.count) \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000351 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100352 name, \
353 stats.count, \
354 stats.total, \
355 stats.active, \
356 stats.inactive, \
357 stats.global, \
358 stats.shared, \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000359 stats.unbound, \
360 stats.closed); \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100361} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800362
363static void print_batch_pool_stats(struct seq_file *m,
364 struct drm_i915_private *dev_priv)
365{
366 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000367 struct intel_engine_cs *engine;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000368 struct file_stats stats = {};
Akash Goel3b3f1652016-10-13 22:44:48 +0530369 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000370 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800371
Akash Goel3b3f1652016-10-13 22:44:48 +0530372 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000373 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100374 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000375 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100376 batch_pool_link)
377 per_file_stats(0, obj, &stats);
378 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100379 }
Brad Volkin493018d2014-12-11 12:13:08 -0800380
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100381 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800382}
383
Chris Wilson15da9562016-05-24 14:53:43 +0100384static void print_context_stats(struct seq_file *m,
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000385 struct drm_i915_private *i915)
Chris Wilson15da9562016-05-24 14:53:43 +0100386{
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000387 struct file_stats kstats = {};
388 struct i915_gem_context *ctx;
Chris Wilson15da9562016-05-24 14:53:43 +0100389
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000390 list_for_each_entry(ctx, &i915->contexts.list, link) {
391 struct intel_engine_cs *engine;
392 enum intel_engine_id id;
Chris Wilson15da9562016-05-24 14:53:43 +0100393
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000394 for_each_engine(engine, i915, id) {
395 struct intel_context *ce = to_intel_context(ctx, engine);
Chris Wilson15da9562016-05-24 14:53:43 +0100396
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000397 if (ce->state)
398 per_file_stats(0, ce->state->obj, &kstats);
399 if (ce->ring)
400 per_file_stats(0, ce->ring->vma->obj, &kstats);
401 }
402
403 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
404 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
405 struct drm_file *file = ctx->file_priv->file;
406 struct task_struct *task;
407 char name[80];
408
409 spin_lock(&file->table_lock);
410 idr_for_each(&file->object_idr, per_file_stats, &stats);
411 spin_unlock(&file->table_lock);
412
413 rcu_read_lock();
414 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
415 snprintf(name, sizeof(name), "%s/%d",
416 task ? task->comm : "<unknown>",
417 ctx->user_handle);
418 rcu_read_unlock();
419
420 print_file_stats(m, name, stats);
421 }
Chris Wilson15da9562016-05-24 14:53:43 +0100422 }
Chris Wilson15da9562016-05-24 14:53:43 +0100423
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000424 print_file_stats(m, "[k]contexts", kstats);
Chris Wilson15da9562016-05-24 14:53:43 +0100425}
426
David Weinehall36cdd012016-08-22 13:59:31 +0300427static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100428{
David Weinehall36cdd012016-08-22 13:59:31 +0300429 struct drm_i915_private *dev_priv = node_to_i915(m->private);
430 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300431 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100432 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
433 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000434 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100435 unsigned int page_sizes = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100436 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100437 int ret;
438
Chris Wilson3ef7f222016-10-18 13:02:48 +0100439 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000440 dev_priv->mm.object_count,
441 dev_priv->mm.object_memory);
442
Chris Wilson1544c422016-08-15 13:18:16 +0100443 size = count = 0;
444 mapped_size = mapped_count = 0;
445 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100446 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100447
448 spin_lock(&dev_priv->mm.obj_lock);
449 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100450 size += obj->base.size;
451 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200452
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100453 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200454 purgeable_size += obj->base.size;
455 ++purgeable_count;
456 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100457
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100458 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100459 mapped_count++;
460 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100461 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100462
463 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
464 huge_count++;
465 huge_size += obj->base.size;
466 page_sizes |= obj->mm.page_sizes.sg;
467 }
Chris Wilson6299f992010-11-24 12:23:44 +0000468 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100469 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
470
471 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100472 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100473 size += obj->base.size;
474 ++count;
475
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100476 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100477 dpy_size += obj->base.size;
478 ++dpy_count;
479 }
480
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100481 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100482 purgeable_size += obj->base.size;
483 ++purgeable_count;
484 }
485
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100486 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100487 mapped_count++;
488 mapped_size += obj->base.size;
489 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100490
491 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
492 huge_count++;
493 huge_size += obj->base.size;
494 page_sizes |= obj->mm.page_sizes.sg;
495 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100496 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100497 spin_unlock(&dev_priv->mm.obj_lock);
498
Chris Wilson2bd160a2016-08-15 10:48:45 +0100499 seq_printf(m, "%u bound objects, %llu bytes\n",
500 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300501 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200502 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100503 seq_printf(m, "%u mapped objects, %llu bytes\n",
504 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100505 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
506 huge_count,
507 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
508 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100509 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100510 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000511
Matthew Auldb7128ef2017-12-11 15:18:22 +0000512 seq_printf(m, "%llu [%pa] gtt total\n",
Chris Wilson82ad6442018-06-05 16:37:58 +0100513 ggtt->vm.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100514 seq_printf(m, "Supported page sizes: %s\n",
515 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
516 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100517
Damien Lespiau267f0c92013-06-24 22:59:48 +0100518 seq_putc(m, '\n');
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000519
520 ret = mutex_lock_interruptible(&dev->struct_mutex);
521 if (ret)
522 return ret;
523
Brad Volkin493018d2014-12-11 12:13:08 -0800524 print_batch_pool_stats(m, dev_priv);
Chris Wilson15da9562016-05-24 14:53:43 +0100525 print_context_stats(m, dev_priv);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000526 mutex_unlock(&dev->struct_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100527
528 return 0;
529}
530
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100531static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000532{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100533 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300534 struct drm_i915_private *dev_priv = node_to_i915(node);
535 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100536 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000537 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300538 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100539 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000540 int count, ret;
541
Chris Wilsonf2123812017-10-16 12:40:37 +0100542 nobject = READ_ONCE(dev_priv->mm.object_count);
543 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
544 if (!objects)
545 return -ENOMEM;
546
Chris Wilson08c18322011-01-10 00:00:24 +0000547 ret = mutex_lock_interruptible(&dev->struct_mutex);
548 if (ret)
549 return ret;
550
Chris Wilsonf2123812017-10-16 12:40:37 +0100551 count = 0;
552 spin_lock(&dev_priv->mm.obj_lock);
553 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
554 objects[count++] = obj;
555 if (count == nobject)
556 break;
557 }
558 spin_unlock(&dev_priv->mm.obj_lock);
559
560 total_obj_size = total_gtt_size = 0;
561 for (n = 0; n < count; n++) {
562 obj = objects[n];
563
Damien Lespiau267f0c92013-06-24 22:59:48 +0100564 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000565 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100566 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000567 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100568 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000569 }
570
571 mutex_unlock(&dev->struct_mutex);
572
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300573 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000574 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100575 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000576
577 return 0;
578}
579
Brad Volkin493018d2014-12-11 12:13:08 -0800580static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
581{
David Weinehall36cdd012016-08-22 13:59:31 +0300582 struct drm_i915_private *dev_priv = node_to_i915(m->private);
583 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800584 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000585 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530586 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100587 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000588 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800589
590 ret = mutex_lock_interruptible(&dev->struct_mutex);
591 if (ret)
592 return ret;
593
Akash Goel3b3f1652016-10-13 22:44:48 +0530594 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000595 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100596 int count;
597
598 count = 0;
599 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000600 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100601 batch_pool_link)
602 count++;
603 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000604 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100605
606 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000607 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100608 batch_pool_link) {
609 seq_puts(m, " ");
610 describe_obj(m, obj);
611 seq_putc(m, '\n');
612 }
613
614 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100615 }
Brad Volkin493018d2014-12-11 12:13:08 -0800616 }
617
Chris Wilson8d9d5742015-04-07 16:20:38 +0100618 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800619
620 mutex_unlock(&dev->struct_mutex);
621
622 return 0;
623}
624
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200625static void gen8_display_interrupt_info(struct seq_file *m)
626{
627 struct drm_i915_private *dev_priv = node_to_i915(m->private);
628 int pipe;
629
630 for_each_pipe(dev_priv, pipe) {
631 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000632 intel_wakeref_t wakeref;
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200633
634 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000635 wakeref = intel_display_power_get_if_enabled(dev_priv,
636 power_domain);
637 if (!wakeref) {
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200638 seq_printf(m, "Pipe %c power disabled\n",
639 pipe_name(pipe));
640 continue;
641 }
642 seq_printf(m, "Pipe %c IMR:\t%08x\n",
643 pipe_name(pipe),
644 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
645 seq_printf(m, "Pipe %c IIR:\t%08x\n",
646 pipe_name(pipe),
647 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
648 seq_printf(m, "Pipe %c IER:\t%08x\n",
649 pipe_name(pipe),
650 I915_READ(GEN8_DE_PIPE_IER(pipe)));
651
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000652 intel_display_power_put(dev_priv, power_domain, wakeref);
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200653 }
654
655 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
656 I915_READ(GEN8_DE_PORT_IMR));
657 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
658 I915_READ(GEN8_DE_PORT_IIR));
659 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
660 I915_READ(GEN8_DE_PORT_IER));
661
662 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
663 I915_READ(GEN8_DE_MISC_IMR));
664 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
665 I915_READ(GEN8_DE_MISC_IIR));
666 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
667 I915_READ(GEN8_DE_MISC_IER));
668
669 seq_printf(m, "PCU interrupt mask:\t%08x\n",
670 I915_READ(GEN8_PCU_IMR));
671 seq_printf(m, "PCU interrupt identity:\t%08x\n",
672 I915_READ(GEN8_PCU_IIR));
673 seq_printf(m, "PCU interrupt enable:\t%08x\n",
674 I915_READ(GEN8_PCU_IER));
675}
676
Ben Gamari20172632009-02-17 20:08:50 -0500677static int i915_interrupt_info(struct seq_file *m, void *data)
678{
David Weinehall36cdd012016-08-22 13:59:31 +0300679 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000680 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530681 enum intel_engine_id id;
Chris Wilsona0371212019-01-14 14:21:14 +0000682 intel_wakeref_t wakeref;
Chris Wilson4bb05042016-09-03 07:53:43 +0100683 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100684
Chris Wilsona0371212019-01-14 14:21:14 +0000685 wakeref = intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500686
David Weinehall36cdd012016-08-22 13:59:31 +0300687 if (IS_CHERRYVIEW(dev_priv)) {
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000688 intel_wakeref_t pref;
689
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300690 seq_printf(m, "Master Interrupt Control:\t%08x\n",
691 I915_READ(GEN8_MASTER_IRQ));
692
693 seq_printf(m, "Display IER:\t%08x\n",
694 I915_READ(VLV_IER));
695 seq_printf(m, "Display IIR:\t%08x\n",
696 I915_READ(VLV_IIR));
697 seq_printf(m, "Display IIR_RW:\t%08x\n",
698 I915_READ(VLV_IIR_RW));
699 seq_printf(m, "Display IMR:\t%08x\n",
700 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100701 for_each_pipe(dev_priv, pipe) {
702 enum intel_display_power_domain power_domain;
703
704 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000705 pref = intel_display_power_get_if_enabled(dev_priv,
706 power_domain);
707 if (!pref) {
Chris Wilson9c870d02016-10-24 13:42:15 +0100708 seq_printf(m, "Pipe %c power disabled\n",
709 pipe_name(pipe));
710 continue;
711 }
712
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300713 seq_printf(m, "Pipe %c stat:\t%08x\n",
714 pipe_name(pipe),
715 I915_READ(PIPESTAT(pipe)));
716
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000717 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson9c870d02016-10-24 13:42:15 +0100718 }
719
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000720 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300721 seq_printf(m, "Port hotplug:\t%08x\n",
722 I915_READ(PORT_HOTPLUG_EN));
723 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
724 I915_READ(VLV_DPFLIPSTAT));
725 seq_printf(m, "DPINVGTT:\t%08x\n",
726 I915_READ(DPINVGTT));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000727 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300728
729 for (i = 0; i < 4; i++) {
730 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
731 i, I915_READ(GEN8_GT_IMR(i)));
732 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
733 i, I915_READ(GEN8_GT_IIR(i)));
734 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
735 i, I915_READ(GEN8_GT_IER(i)));
736 }
737
738 seq_printf(m, "PCU interrupt mask:\t%08x\n",
739 I915_READ(GEN8_PCU_IMR));
740 seq_printf(m, "PCU interrupt identity:\t%08x\n",
741 I915_READ(GEN8_PCU_IIR));
742 seq_printf(m, "PCU interrupt enable:\t%08x\n",
743 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200744 } else if (INTEL_GEN(dev_priv) >= 11) {
745 seq_printf(m, "Master Interrupt Control: %08x\n",
746 I915_READ(GEN11_GFX_MSTR_IRQ));
747
748 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
749 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
750 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
751 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
752 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
753 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
754 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
755 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
756 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
757 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
758 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
759 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
760
761 seq_printf(m, "Display Interrupt Control:\t%08x\n",
762 I915_READ(GEN11_DISPLAY_INT_CTL));
763
764 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300765 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700766 seq_printf(m, "Master Interrupt Control:\t%08x\n",
767 I915_READ(GEN8_MASTER_IRQ));
768
769 for (i = 0; i < 4; i++) {
770 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
771 i, I915_READ(GEN8_GT_IMR(i)));
772 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
773 i, I915_READ(GEN8_GT_IIR(i)));
774 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
775 i, I915_READ(GEN8_GT_IER(i)));
776 }
777
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200778 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300779 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700780 seq_printf(m, "Display IER:\t%08x\n",
781 I915_READ(VLV_IER));
782 seq_printf(m, "Display IIR:\t%08x\n",
783 I915_READ(VLV_IIR));
784 seq_printf(m, "Display IIR_RW:\t%08x\n",
785 I915_READ(VLV_IIR_RW));
786 seq_printf(m, "Display IMR:\t%08x\n",
787 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000788 for_each_pipe(dev_priv, pipe) {
789 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000790 intel_wakeref_t pref;
Chris Wilson4f4631a2017-02-10 13:36:32 +0000791
792 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000793 pref = intel_display_power_get_if_enabled(dev_priv,
794 power_domain);
795 if (!pref) {
Chris Wilson4f4631a2017-02-10 13:36:32 +0000796 seq_printf(m, "Pipe %c power disabled\n",
797 pipe_name(pipe));
798 continue;
799 }
800
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700801 seq_printf(m, "Pipe %c stat:\t%08x\n",
802 pipe_name(pipe),
803 I915_READ(PIPESTAT(pipe)));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000804 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson4f4631a2017-02-10 13:36:32 +0000805 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700806
807 seq_printf(m, "Master IER:\t%08x\n",
808 I915_READ(VLV_MASTER_IER));
809
810 seq_printf(m, "Render IER:\t%08x\n",
811 I915_READ(GTIER));
812 seq_printf(m, "Render IIR:\t%08x\n",
813 I915_READ(GTIIR));
814 seq_printf(m, "Render IMR:\t%08x\n",
815 I915_READ(GTIMR));
816
817 seq_printf(m, "PM IER:\t\t%08x\n",
818 I915_READ(GEN6_PMIER));
819 seq_printf(m, "PM IIR:\t\t%08x\n",
820 I915_READ(GEN6_PMIIR));
821 seq_printf(m, "PM IMR:\t\t%08x\n",
822 I915_READ(GEN6_PMIMR));
823
824 seq_printf(m, "Port hotplug:\t%08x\n",
825 I915_READ(PORT_HOTPLUG_EN));
826 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
827 I915_READ(VLV_DPFLIPSTAT));
828 seq_printf(m, "DPINVGTT:\t%08x\n",
829 I915_READ(DPINVGTT));
830
David Weinehall36cdd012016-08-22 13:59:31 +0300831 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800832 seq_printf(m, "Interrupt enable: %08x\n",
833 I915_READ(IER));
834 seq_printf(m, "Interrupt identity: %08x\n",
835 I915_READ(IIR));
836 seq_printf(m, "Interrupt mask: %08x\n",
837 I915_READ(IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100838 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800839 seq_printf(m, "Pipe %c stat: %08x\n",
840 pipe_name(pipe),
841 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800842 } else {
843 seq_printf(m, "North Display Interrupt enable: %08x\n",
844 I915_READ(DEIER));
845 seq_printf(m, "North Display Interrupt identity: %08x\n",
846 I915_READ(DEIIR));
847 seq_printf(m, "North Display Interrupt mask: %08x\n",
848 I915_READ(DEIMR));
849 seq_printf(m, "South Display Interrupt enable: %08x\n",
850 I915_READ(SDEIER));
851 seq_printf(m, "South Display Interrupt identity: %08x\n",
852 I915_READ(SDEIIR));
853 seq_printf(m, "South Display Interrupt mask: %08x\n",
854 I915_READ(SDEIMR));
855 seq_printf(m, "Graphics Interrupt enable: %08x\n",
856 I915_READ(GTIER));
857 seq_printf(m, "Graphics Interrupt identity: %08x\n",
858 I915_READ(GTIIR));
859 seq_printf(m, "Graphics Interrupt mask: %08x\n",
860 I915_READ(GTIMR));
861 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200862
863 if (INTEL_GEN(dev_priv) >= 11) {
864 seq_printf(m, "RCS Intr Mask:\t %08x\n",
865 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
866 seq_printf(m, "BCS Intr Mask:\t %08x\n",
867 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
868 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
869 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
870 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
871 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
872 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
873 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
874 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
875 I915_READ(GEN11_GUC_SG_INTR_MASK));
876 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
877 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
878 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
879 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
880 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
881 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
882
883 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000884 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100885 seq_printf(m,
886 "Graphics Interrupt mask (%s): %08x\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000887 engine->name, I915_READ_IMR(engine));
Chris Wilson9862e602011-01-04 22:22:17 +0000888 }
Chris Wilson9862e602011-01-04 22:22:17 +0000889 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200890
Chris Wilsona0371212019-01-14 14:21:14 +0000891 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100892
Ben Gamari20172632009-02-17 20:08:50 -0500893 return 0;
894}
895
Chris Wilsona6172a82009-02-11 14:26:38 +0000896static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
897{
David Weinehall36cdd012016-08-22 13:59:31 +0300898 struct drm_i915_private *dev_priv = node_to_i915(m->private);
899 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100900 int i, ret;
901
902 ret = mutex_lock_interruptible(&dev->struct_mutex);
903 if (ret)
904 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000905
Chris Wilsona6172a82009-02-11 14:26:38 +0000906 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
907 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100908 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000909
Chris Wilson6c085a72012-08-20 11:40:46 +0200910 seq_printf(m, "Fence %d, pin count = %d, object = ",
911 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100912 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100913 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100914 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100915 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100916 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000917 }
918
Chris Wilson05394f32010-11-08 19:18:58 +0000919 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000920 return 0;
921}
922
Chris Wilson98a2f412016-10-12 10:05:18 +0100923#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000924static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
925 size_t count, loff_t *pos)
926{
Chris Wilson0e390372018-11-23 13:23:25 +0000927 struct i915_gpu_state *error;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000928 ssize_t ret;
Chris Wilson0e390372018-11-23 13:23:25 +0000929 void *buf;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000930
Chris Wilson0e390372018-11-23 13:23:25 +0000931 error = file->private_data;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000932 if (!error)
933 return 0;
934
Chris Wilson0e390372018-11-23 13:23:25 +0000935 /* Bounce buffer required because of kernfs __user API convenience. */
936 buf = kmalloc(count, GFP_KERNEL);
937 if (!buf)
938 return -ENOMEM;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000939
Chris Wilson0e390372018-11-23 13:23:25 +0000940 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
941 if (ret <= 0)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000942 goto out;
943
Chris Wilson0e390372018-11-23 13:23:25 +0000944 if (!copy_to_user(ubuf, buf, ret))
945 *pos += ret;
946 else
947 ret = -EFAULT;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000948
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000949out:
Chris Wilson0e390372018-11-23 13:23:25 +0000950 kfree(buf);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000951 return ret;
952}
953
954static int gpu_state_release(struct inode *inode, struct file *file)
955{
956 i915_gpu_state_put(file->private_data);
957 return 0;
958}
959
960static int i915_gpu_info_open(struct inode *inode, struct file *file)
961{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100962 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000963 struct i915_gpu_state *gpu;
Chris Wilsona0371212019-01-14 14:21:14 +0000964 intel_wakeref_t wakeref;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000965
Chris Wilsond4225a52019-01-14 14:21:23 +0000966 gpu = NULL;
967 with_intel_runtime_pm(i915, wakeref)
968 gpu = i915_capture_gpu_state(i915);
Chris Wilsone6154e42018-12-07 11:05:54 +0000969 if (IS_ERR(gpu))
970 return PTR_ERR(gpu);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000971
972 file->private_data = gpu;
973 return 0;
974}
975
976static const struct file_operations i915_gpu_info_fops = {
977 .owner = THIS_MODULE,
978 .open = i915_gpu_info_open,
979 .read = gpu_state_read,
980 .llseek = default_llseek,
981 .release = gpu_state_release,
982};
Chris Wilson98a2f412016-10-12 10:05:18 +0100983
Daniel Vetterd5442302012-04-27 15:17:40 +0200984static ssize_t
985i915_error_state_write(struct file *filp,
986 const char __user *ubuf,
987 size_t cnt,
988 loff_t *ppos)
989{
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000990 struct i915_gpu_state *error = filp->private_data;
991
992 if (!error)
993 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200994
995 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000996 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +0200997
998 return cnt;
999}
1000
1001static int i915_error_state_open(struct inode *inode, struct file *file)
1002{
Chris Wilsone6154e42018-12-07 11:05:54 +00001003 struct i915_gpu_state *error;
1004
1005 error = i915_first_error_state(inode->i_private);
1006 if (IS_ERR(error))
1007 return PTR_ERR(error);
1008
1009 file->private_data = error;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001010 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001011}
1012
Daniel Vetterd5442302012-04-27 15:17:40 +02001013static const struct file_operations i915_error_state_fops = {
1014 .owner = THIS_MODULE,
1015 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001016 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001017 .write = i915_error_state_write,
1018 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001019 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001020};
Chris Wilson98a2f412016-10-12 10:05:18 +01001021#endif
1022
Deepak Sadb4bd12014-03-31 11:30:02 +05301023static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001024{
David Weinehall36cdd012016-08-22 13:59:31 +03001025 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001026 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsona0371212019-01-14 14:21:14 +00001027 intel_wakeref_t wakeref;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001028 int ret = 0;
1029
Chris Wilsona0371212019-01-14 14:21:14 +00001030 wakeref = intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001031
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001032 if (IS_GEN(dev_priv, 5)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001033 u16 rgvswctl = I915_READ16(MEMSWCTL);
1034 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1035
1036 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1037 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1038 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1039 MEMSTAT_VID_SHIFT);
1040 seq_printf(m, "Current P-state: %d\n",
1041 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001042 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001043 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001044
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001045 mutex_lock(&dev_priv->pcu_lock);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001046
1047 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1048 seq_printf(m, "Video Turbo Mode: %s\n",
1049 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1050 seq_printf(m, "HW control enabled: %s\n",
1051 yesno(rpmodectl & GEN6_RP_ENABLE));
1052 seq_printf(m, "SW control enabled: %s\n",
1053 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1054 GEN6_RP_MEDIA_SW_MODE));
1055
Wayne Boyer666a4532015-12-09 12:29:35 -08001056 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1057 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1058 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1059
1060 seq_printf(m, "actual GPU freq: %d MHz\n",
1061 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1062
1063 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001064 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001065
1066 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001067 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001068
1069 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001070 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001071
1072 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001073 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001074
1075 seq_printf(m,
1076 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001077 intel_gpu_freq(dev_priv, rps->efficient_freq));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001078 mutex_unlock(&dev_priv->pcu_lock);
David Weinehall36cdd012016-08-22 13:59:31 +03001079 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001080 u32 rp_state_limits;
1081 u32 gt_perf_status;
1082 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001083 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001084 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001085 u32 rpupei, rpcurup, rpprevup;
1086 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001087 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001088 int max_freq;
1089
Bob Paauwe35040562015-06-25 14:54:07 -07001090 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001091 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001092 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1093 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1094 } else {
1095 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1096 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1097 }
1098
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001099 /* RPSTAT1 is in the GT power well */
Mika Kuoppala59bad942015-01-16 11:34:40 +02001100 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001101
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001102 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001103 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301104 reqf >>= 23;
1105 else {
1106 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001107 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301108 reqf >>= 24;
1109 else
1110 reqf >>= 25;
1111 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001112 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001113
Chris Wilson0d8f9492014-03-27 09:06:14 +00001114 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1115 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1116 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1117
Jesse Barnesccab5c82011-01-18 15:49:25 -08001118 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301119 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1120 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1121 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1122 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1123 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1124 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001125 cagf = intel_gpu_freq(dev_priv,
1126 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001127
Mika Kuoppala59bad942015-01-16 11:34:40 +02001128 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001129
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001130 if (INTEL_GEN(dev_priv) >= 11) {
1131 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1132 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1133 /*
1134 * The equivalent to the PM ISR & IIR cannot be read
1135 * without affecting the current state of the system
1136 */
1137 pm_isr = 0;
1138 pm_iir = 0;
1139 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001140 pm_ier = I915_READ(GEN8_GT_IER(2));
1141 pm_imr = I915_READ(GEN8_GT_IMR(2));
1142 pm_isr = I915_READ(GEN8_GT_ISR(2));
1143 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001144 } else {
1145 pm_ier = I915_READ(GEN6_PMIER);
1146 pm_imr = I915_READ(GEN6_PMIMR);
1147 pm_isr = I915_READ(GEN6_PMISR);
1148 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001149 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001150 pm_mask = I915_READ(GEN6_PMINTRMSK);
1151
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001152 seq_printf(m, "Video Turbo Mode: %s\n",
1153 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1154 seq_printf(m, "HW control enabled: %s\n",
1155 yesno(rpmodectl & GEN6_RP_ENABLE));
1156 seq_printf(m, "SW control enabled: %s\n",
1157 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1158 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001159
1160 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1161 pm_ier, pm_imr, pm_mask);
1162 if (INTEL_GEN(dev_priv) <= 10)
1163 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1164 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301165 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001166 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001167 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001168 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001169 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001170 seq_printf(m, "Render p-state VID: %d\n",
1171 gt_perf_status & 0xff);
1172 seq_printf(m, "Render p-state limit: %d\n",
1173 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001174 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1175 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1176 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1177 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001178 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001179 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301180 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1181 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1182 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1183 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1184 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1185 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilson60548c52018-07-31 14:26:29 +01001186 seq_printf(m, "Up threshold: %d%%\n",
1187 rps->power.up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001188
Akash Goeld6cda9c2016-04-23 00:05:46 +05301189 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1190 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1191 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1192 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1193 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1194 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilson60548c52018-07-31 14:26:29 +01001195 seq_printf(m, "Down threshold: %d%%\n",
1196 rps->power.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001197
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001198 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001199 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001200 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001201 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001202 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001203 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001204
1205 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001206 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001207 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001208 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001209 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001210
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001211 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001212 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001213 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001214 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001215 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001216 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001217 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001218 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001219
Chris Wilsond86ed342015-04-27 13:41:19 +01001220 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001221 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001222 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001223 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001224 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001225 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001226 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001227 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001228 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001229 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001230 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001231 seq_printf(m,
1232 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001233 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001234 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001235 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001236 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001237
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001238 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001239 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1240 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1241
Chris Wilsona0371212019-01-14 14:21:14 +00001242 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001243 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001244}
1245
Ben Widawskyd6369512016-09-20 16:54:32 +03001246static void i915_instdone_info(struct drm_i915_private *dev_priv,
1247 struct seq_file *m,
1248 struct intel_instdone *instdone)
1249{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001250 int slice;
1251 int subslice;
1252
Ben Widawskyd6369512016-09-20 16:54:32 +03001253 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1254 instdone->instdone);
1255
1256 if (INTEL_GEN(dev_priv) <= 3)
1257 return;
1258
1259 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1260 instdone->slice_common);
1261
1262 if (INTEL_GEN(dev_priv) <= 6)
1263 return;
1264
Ben Widawskyf9e61372016-09-20 16:54:33 +03001265 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1266 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1267 slice, subslice, instdone->sampler[slice][subslice]);
1268
1269 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1270 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1271 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001272}
1273
Chris Wilsonf6544492015-01-26 18:03:04 +02001274static int i915_hangcheck_info(struct seq_file *m, void *unused)
1275{
David Weinehall36cdd012016-08-22 13:59:31 +03001276 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001277 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001278 u64 acthd[I915_NUM_ENGINES];
1279 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001280 struct intel_instdone instdone;
Chris Wilsona0371212019-01-14 14:21:14 +00001281 intel_wakeref_t wakeref;
Dave Gordonc3232b12016-03-23 18:19:53 +00001282 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001283
Chris Wilson2caffbf2019-02-08 15:37:03 +00001284 seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
Chris Wilson8af29b02016-09-09 14:11:47 +01001285 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001286 seq_puts(m, "\tWedged\n");
Chris Wilson8c185ec2017-03-16 17:13:02 +00001287 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001288 seq_puts(m, "\tDevice (global) reset in progress\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001289
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001290 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001291 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001292 return 0;
1293 }
1294
Chris Wilsond4225a52019-01-14 14:21:23 +00001295 with_intel_runtime_pm(dev_priv, wakeref) {
1296 for_each_engine(engine, dev_priv, id) {
1297 acthd[id] = intel_engine_get_active_head(engine);
1298 seqno[id] = intel_engine_get_seqno(engine);
1299 }
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001300
Chris Wilsond4225a52019-01-14 14:21:23 +00001301 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001302 }
1303
Chris Wilson8352aea2017-03-03 09:00:56 +00001304 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1305 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001306 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1307 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001308 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1309 seq_puts(m, "Hangcheck active, work pending\n");
1310 else
1311 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001312
Chris Wilsonf73b5672017-03-02 15:03:56 +00001313 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1314
Akash Goel3b3f1652016-10-13 22:44:48 +05301315 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001316 seq_printf(m, "%s:\n", engine->name);
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001317 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
Chris Wilsoncb399ea2016-11-01 10:03:16 +00001318 engine->hangcheck.seqno, seqno[id],
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001319 intel_engine_last_submit(engine),
1320 jiffies_to_msecs(jiffies -
1321 engine->hangcheck.action_timestamp));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001322
Chris Wilsonf6544492015-01-26 18:03:04 +02001323 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001324 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001325 (long long)acthd[id]);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001326
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001327 if (engine->id == RCS) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001328 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001329
Ben Widawskyd6369512016-09-20 16:54:32 +03001330 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001331
Ben Widawskyd6369512016-09-20 16:54:32 +03001332 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001333
Ben Widawskyd6369512016-09-20 16:54:32 +03001334 i915_instdone_info(dev_priv, m,
1335 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001336 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001337 }
1338
1339 return 0;
1340}
1341
Michel Thierry061d06a2017-06-20 10:57:49 +01001342static int i915_reset_info(struct seq_file *m, void *unused)
1343{
1344 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1345 struct i915_gpu_error *error = &dev_priv->gpu_error;
1346 struct intel_engine_cs *engine;
1347 enum intel_engine_id id;
1348
1349 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1350
1351 for_each_engine(engine, dev_priv, id) {
1352 seq_printf(m, "%s = %u\n", engine->name,
1353 i915_reset_engine_count(error, engine));
1354 }
1355
1356 return 0;
1357}
1358
Ben Widawsky4d855292011-12-12 19:34:16 -08001359static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001360{
David Weinehall36cdd012016-08-22 13:59:31 +03001361 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001362 u32 rgvmodectl, rstdbyctl;
1363 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001364
Ben Widawsky616fdb52011-10-05 11:44:54 -07001365 rgvmodectl = I915_READ(MEMMODECTL);
1366 rstdbyctl = I915_READ(RSTDBYCTL);
1367 crstandvid = I915_READ16(CRSTANDVID);
1368
Jani Nikula742f4912015-09-03 11:16:09 +03001369 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001370 seq_printf(m, "Boost freq: %d\n",
1371 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1372 MEMMODE_BOOST_FREQ_SHIFT);
1373 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001374 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001375 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001376 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001377 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001378 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001379 seq_printf(m, "Starting frequency: P%d\n",
1380 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001381 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001382 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001383 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1384 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1385 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1386 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001387 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001388 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001389 switch (rstdbyctl & RSX_STATUS_MASK) {
1390 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001391 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001392 break;
1393 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001394 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001395 break;
1396 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001397 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001398 break;
1399 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001400 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001401 break;
1402 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001403 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001404 break;
1405 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001406 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001407 break;
1408 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001409 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001410 break;
1411 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001412
1413 return 0;
1414}
1415
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001416static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001417{
Chris Wilson233ebf52017-03-23 10:19:44 +00001418 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001419 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001420 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001421
Chris Wilsond7a133d2017-09-07 14:44:41 +01001422 seq_printf(m, "user.bypass_count = %u\n",
1423 i915->uncore.user_forcewake.count);
1424
Chris Wilson233ebf52017-03-23 10:19:44 +00001425 for_each_fw_domain(fw_domain, i915, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001426 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001427 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001428 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001429
1430 return 0;
1431}
1432
Mika Kuoppala13628772017-03-15 17:43:02 +02001433static void print_rc6_res(struct seq_file *m,
1434 const char *title,
1435 const i915_reg_t reg)
1436{
1437 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1438
1439 seq_printf(m, "%s %u (%llu us)\n",
1440 title, I915_READ(reg),
1441 intel_rc6_residency_us(dev_priv, reg));
1442}
1443
Deepak S669ab5a2014-01-10 15:18:26 +05301444static int vlv_drpc_info(struct seq_file *m)
1445{
David Weinehall36cdd012016-08-22 13:59:31 +03001446 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001447 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301448
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001449 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301450 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1451
Deepak S669ab5a2014-01-10 15:18:26 +05301452 seq_printf(m, "RC6 Enabled: %s\n",
1453 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1454 GEN6_RC_CTL_EI_MODE(1))));
1455 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001456 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301457 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001458 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301459
Mika Kuoppala13628772017-03-15 17:43:02 +02001460 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1461 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001462
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001463 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301464}
1465
Ben Widawsky4d855292011-12-12 19:34:16 -08001466static int gen6_drpc_info(struct seq_file *m)
1467{
David Weinehall36cdd012016-08-22 13:59:31 +03001468 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001469 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301470 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001471
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001472 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001473 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001474
Ben Widawsky4d855292011-12-12 19:34:16 -08001475 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001476 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301477 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1478 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1479 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001480
Imre Deak51cc9ad2018-02-08 19:41:02 +02001481 if (INTEL_GEN(dev_priv) <= 7) {
1482 mutex_lock(&dev_priv->pcu_lock);
1483 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1484 &rc6vids);
1485 mutex_unlock(&dev_priv->pcu_lock);
1486 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001487
Eric Anholtfff24e22012-01-23 16:14:05 -08001488 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001489 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1490 seq_printf(m, "RC6 Enabled: %s\n",
1491 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001492 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301493 seq_printf(m, "Render Well Gating Enabled: %s\n",
1494 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1495 seq_printf(m, "Media Well Gating Enabled: %s\n",
1496 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1497 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001498 seq_printf(m, "Deep RC6 Enabled: %s\n",
1499 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1500 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1501 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001502 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001503 switch (gt_core_status & GEN6_RCn_MASK) {
1504 case GEN6_RC0:
1505 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001506 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001507 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001508 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001509 break;
1510 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001511 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001512 break;
1513 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001514 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001515 break;
1516 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001517 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001518 break;
1519 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001520 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001521 break;
1522 }
1523
1524 seq_printf(m, "Core Power Down: %s\n",
1525 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001526 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301527 seq_printf(m, "Render Power Well: %s\n",
1528 (gen9_powergate_status &
1529 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1530 seq_printf(m, "Media Power Well: %s\n",
1531 (gen9_powergate_status &
1532 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1533 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001534
1535 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001536 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1537 GEN6_GT_GFX_RC6_LOCKED);
1538 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1539 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1540 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001541
Imre Deak51cc9ad2018-02-08 19:41:02 +02001542 if (INTEL_GEN(dev_priv) <= 7) {
1543 seq_printf(m, "RC6 voltage: %dmV\n",
1544 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1545 seq_printf(m, "RC6+ voltage: %dmV\n",
1546 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1547 seq_printf(m, "RC6++ voltage: %dmV\n",
1548 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1549 }
1550
Akash Goelf2dd7572016-06-27 20:10:01 +05301551 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001552}
1553
1554static int i915_drpc_info(struct seq_file *m, void *unused)
1555{
David Weinehall36cdd012016-08-22 13:59:31 +03001556 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001557 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00001558 int err = -ENODEV;
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001559
Chris Wilsond4225a52019-01-14 14:21:23 +00001560 with_intel_runtime_pm(dev_priv, wakeref) {
1561 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1562 err = vlv_drpc_info(m);
1563 else if (INTEL_GEN(dev_priv) >= 6)
1564 err = gen6_drpc_info(m);
1565 else
1566 err = ironlake_drpc_info(m);
1567 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001568
1569 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001570}
1571
Daniel Vetter9a851782015-06-18 10:30:22 +02001572static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1573{
David Weinehall36cdd012016-08-22 13:59:31 +03001574 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001575
1576 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1577 dev_priv->fb_tracking.busy_bits);
1578
1579 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1580 dev_priv->fb_tracking.flip_bits);
1581
1582 return 0;
1583}
1584
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001585static int i915_fbc_status(struct seq_file *m, void *unused)
1586{
David Weinehall36cdd012016-08-22 13:59:31 +03001587 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001588 struct intel_fbc *fbc = &dev_priv->fbc;
Chris Wilsona0371212019-01-14 14:21:14 +00001589 intel_wakeref_t wakeref;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001590
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001591 if (!HAS_FBC(dev_priv))
1592 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001593
Chris Wilsona0371212019-01-14 14:21:14 +00001594 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001595 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001596
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001597 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001598 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001599 else
Chris Wilson31388722017-12-20 20:58:48 +00001600 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1601
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001602 if (intel_fbc_is_active(dev_priv)) {
1603 u32 mask;
1604
1605 if (INTEL_GEN(dev_priv) >= 8)
1606 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1607 else if (INTEL_GEN(dev_priv) >= 7)
1608 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1609 else if (INTEL_GEN(dev_priv) >= 5)
1610 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1611 else if (IS_G4X(dev_priv))
1612 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1613 else
1614 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1615 FBC_STAT_COMPRESSED);
1616
1617 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001618 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001619
Chris Wilson31388722017-12-20 20:58:48 +00001620 mutex_unlock(&fbc->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00001621 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001622
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001623 return 0;
1624}
1625
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001626static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001627{
David Weinehall36cdd012016-08-22 13:59:31 +03001628 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001629
David Weinehall36cdd012016-08-22 13:59:31 +03001630 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001631 return -ENODEV;
1632
Rodrigo Vivida46f932014-08-01 02:04:45 -07001633 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001634
1635 return 0;
1636}
1637
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001638static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001639{
David Weinehall36cdd012016-08-22 13:59:31 +03001640 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001641 u32 reg;
1642
David Weinehall36cdd012016-08-22 13:59:31 +03001643 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001644 return -ENODEV;
1645
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001646 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001647
1648 reg = I915_READ(ILK_DPFC_CONTROL);
1649 dev_priv->fbc.false_color = val;
1650
1651 I915_WRITE(ILK_DPFC_CONTROL, val ?
1652 (reg | FBC_CTL_FALSE_COLOR) :
1653 (reg & ~FBC_CTL_FALSE_COLOR));
1654
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001655 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001656 return 0;
1657}
1658
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001659DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1660 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001661 "%llu\n");
1662
Paulo Zanoni92d44622013-05-31 16:33:24 -03001663static int i915_ips_status(struct seq_file *m, void *unused)
1664{
David Weinehall36cdd012016-08-22 13:59:31 +03001665 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001666 intel_wakeref_t wakeref;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001667
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001668 if (!HAS_IPS(dev_priv))
1669 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001670
Chris Wilsona0371212019-01-14 14:21:14 +00001671 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001672
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001673 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001674 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001675
David Weinehall36cdd012016-08-22 13:59:31 +03001676 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001677 seq_puts(m, "Currently: unknown\n");
1678 } else {
1679 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1680 seq_puts(m, "Currently: enabled\n");
1681 else
1682 seq_puts(m, "Currently: disabled\n");
1683 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001684
Chris Wilsona0371212019-01-14 14:21:14 +00001685 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001686
Paulo Zanoni92d44622013-05-31 16:33:24 -03001687 return 0;
1688}
1689
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001690static int i915_sr_status(struct seq_file *m, void *unused)
1691{
David Weinehall36cdd012016-08-22 13:59:31 +03001692 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001693 intel_wakeref_t wakeref;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001694 bool sr_enabled = false;
1695
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001696 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001697
Chris Wilson7342a722017-03-09 14:20:49 +00001698 if (INTEL_GEN(dev_priv) >= 9)
1699 /* no global SR status; inspect per-plane WM */;
1700 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001701 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001702 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001703 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001704 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001705 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001706 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001707 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001708 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001709 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001710 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001711
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001712 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001713
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001714 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001715
1716 return 0;
1717}
1718
Jesse Barnes7648fa92010-05-20 14:28:11 -07001719static int i915_emon_status(struct seq_file *m, void *unused)
1720{
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001721 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001722 intel_wakeref_t wakeref;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001723
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001724 if (!IS_GEN(i915, 5))
Chris Wilson582be6b2012-04-30 19:35:02 +01001725 return -ENODEV;
1726
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001727 with_intel_runtime_pm(i915, wakeref) {
1728 unsigned long temp, chipset, gfx;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001729
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001730 temp = i915_mch_val(i915);
1731 chipset = i915_chipset_val(i915);
1732 gfx = i915_gfx_val(i915);
Chris Wilsona0371212019-01-14 14:21:14 +00001733
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001734 seq_printf(m, "GMCH temp: %ld\n", temp);
1735 seq_printf(m, "Chipset power: %ld\n", chipset);
1736 seq_printf(m, "GFX power: %ld\n", gfx);
1737 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1738 }
Jesse Barnes7648fa92010-05-20 14:28:11 -07001739
1740 return 0;
1741}
1742
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001743static int i915_ring_freq_table(struct seq_file *m, void *unused)
1744{
David Weinehall36cdd012016-08-22 13:59:31 +03001745 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001746 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301747 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001748 intel_wakeref_t wakeref;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001749 int gpu_freq, ia_freq;
1750 int ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001751
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001752 if (!HAS_LLC(dev_priv))
1753 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001754
Chris Wilsona0371212019-01-14 14:21:14 +00001755 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001756
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001757 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001758 if (ret)
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001759 goto out;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001760
Chris Wilsond586b5f2018-03-08 14:26:48 +00001761 min_gpu_freq = rps->min_freq;
1762 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001763 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301764 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001765 min_gpu_freq /= GEN9_FREQ_SCALER;
1766 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301767 }
1768
Damien Lespiau267f0c92013-06-24 22:59:48 +01001769 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001770
Akash Goelf936ec32015-06-29 14:50:22 +05301771 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001772 ia_freq = gpu_freq;
1773 sandybridge_pcode_read(dev_priv,
1774 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1775 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001776 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301777 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001778 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001779 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001780 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001781 ((ia_freq >> 0) & 0xff) * 100,
1782 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001783 }
1784
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001785 mutex_unlock(&dev_priv->pcu_lock);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001786
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001787out:
Chris Wilsona0371212019-01-14 14:21:14 +00001788 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni5bfa0192013-12-19 11:54:52 -02001789 return ret;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001790}
1791
Chris Wilson44834a62010-08-19 16:09:23 +01001792static int i915_opregion(struct seq_file *m, void *unused)
1793{
David Weinehall36cdd012016-08-22 13:59:31 +03001794 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1795 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001796 struct intel_opregion *opregion = &dev_priv->opregion;
1797 int ret;
1798
1799 ret = mutex_lock_interruptible(&dev->struct_mutex);
1800 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001801 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001802
Jani Nikula2455a8e2015-12-14 12:50:53 +02001803 if (opregion->header)
1804 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001805
1806 mutex_unlock(&dev->struct_mutex);
1807
Daniel Vetter0d38f002012-04-21 22:49:10 +02001808out:
Chris Wilson44834a62010-08-19 16:09:23 +01001809 return 0;
1810}
1811
Jani Nikulaada8f952015-12-15 13:17:12 +02001812static int i915_vbt(struct seq_file *m, void *unused)
1813{
David Weinehall36cdd012016-08-22 13:59:31 +03001814 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001815
1816 if (opregion->vbt)
1817 seq_write(m, opregion->vbt, opregion->vbt_size);
1818
1819 return 0;
1820}
1821
Chris Wilson37811fc2010-08-25 22:45:57 +01001822static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1823{
David Weinehall36cdd012016-08-22 13:59:31 +03001824 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1825 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301826 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001827 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001828 int ret;
1829
1830 ret = mutex_lock_interruptible(&dev->struct_mutex);
1831 if (ret)
1832 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001833
Daniel Vetter06957262015-08-10 13:34:08 +02001834#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001835 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001836 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001837
Chris Wilson25bcce92016-07-02 15:36:00 +01001838 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1839 fbdev_fb->base.width,
1840 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001841 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001842 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001843 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001844 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001845 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001846 seq_putc(m, '\n');
1847 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001848#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001849
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001850 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001851 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301852 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1853 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001854 continue;
1855
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001856 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001857 fb->base.width,
1858 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001859 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001860 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001861 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001862 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001863 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001864 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001865 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001866 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001867 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001868
1869 return 0;
1870}
1871
Chris Wilson7e37f882016-08-02 22:50:21 +01001872static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001873{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001874 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1875 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001876}
1877
Ben Widawskye76d3632011-03-19 18:14:29 -07001878static int i915_context_status(struct seq_file *m, void *unused)
1879{
David Weinehall36cdd012016-08-22 13:59:31 +03001880 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1881 struct drm_device *dev = &dev_priv->drm;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001882 struct intel_engine_cs *engine;
Chris Wilsone2efd132016-05-24 14:53:34 +01001883 struct i915_gem_context *ctx;
Akash Goel3b3f1652016-10-13 22:44:48 +05301884 enum intel_engine_id id;
Dave Gordonc3232b12016-03-23 18:19:53 +00001885 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001886
Daniel Vetterf3d28872014-05-29 23:23:08 +02001887 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001888 if (ret)
1889 return ret;
1890
Chris Wilson829a0af2017-06-20 12:05:45 +01001891 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson288f1ce2018-09-04 16:31:17 +01001892 seq_puts(m, "HW context ");
1893 if (!list_empty(&ctx->hw_id_link))
1894 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1895 atomic_read(&ctx->hw_id_pin_count));
Chris Wilsonc84455b2016-08-15 10:49:08 +01001896 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001897 struct task_struct *task;
1898
Chris Wilsonc84455b2016-08-15 10:49:08 +01001899 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001900 if (task) {
1901 seq_printf(m, "(%s [%d]) ",
1902 task->comm, task->pid);
1903 put_task_struct(task);
1904 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001905 } else if (IS_ERR(ctx->file_priv)) {
1906 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001907 } else {
1908 seq_puts(m, "(kernel) ");
1909 }
1910
Chris Wilsonbca44d82016-05-24 14:53:41 +01001911 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1912 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001913
Akash Goel3b3f1652016-10-13 22:44:48 +05301914 for_each_engine(engine, dev_priv, id) {
Chris Wilsonab82a062018-04-30 14:15:01 +01001915 struct intel_context *ce =
1916 to_intel_context(ctx, engine);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001917
1918 seq_printf(m, "%s: ", engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001919 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001920 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001921 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001922 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001923 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001924 }
1925
Ben Widawskya33afea2013-09-17 21:12:45 -07001926 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001927 }
1928
Daniel Vetterf3d28872014-05-29 23:23:08 +02001929 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001930
1931 return 0;
1932}
1933
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001934static const char *swizzle_string(unsigned swizzle)
1935{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001936 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001937 case I915_BIT_6_SWIZZLE_NONE:
1938 return "none";
1939 case I915_BIT_6_SWIZZLE_9:
1940 return "bit9";
1941 case I915_BIT_6_SWIZZLE_9_10:
1942 return "bit9/bit10";
1943 case I915_BIT_6_SWIZZLE_9_11:
1944 return "bit9/bit11";
1945 case I915_BIT_6_SWIZZLE_9_10_11:
1946 return "bit9/bit10/bit11";
1947 case I915_BIT_6_SWIZZLE_9_17:
1948 return "bit9/bit17";
1949 case I915_BIT_6_SWIZZLE_9_10_17:
1950 return "bit9/bit10/bit17";
1951 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001952 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001953 }
1954
1955 return "bug";
1956}
1957
1958static int i915_swizzle_info(struct seq_file *m, void *data)
1959{
David Weinehall36cdd012016-08-22 13:59:31 +03001960 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001961 intel_wakeref_t wakeref;
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001962
Chris Wilsona0371212019-01-14 14:21:14 +00001963 wakeref = intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001964
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001965 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1966 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1967 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1968 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1969
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08001970 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001971 seq_printf(m, "DDC = 0x%08x\n",
1972 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01001973 seq_printf(m, "DDC2 = 0x%08x\n",
1974 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001975 seq_printf(m, "C0DRB3 = 0x%04x\n",
1976 I915_READ16(C0DRB3));
1977 seq_printf(m, "C1DRB3 = 0x%04x\n",
1978 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03001979 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001980 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1981 I915_READ(MAD_DIMM_C0));
1982 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1983 I915_READ(MAD_DIMM_C1));
1984 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1985 I915_READ(MAD_DIMM_C2));
1986 seq_printf(m, "TILECTL = 0x%08x\n",
1987 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03001988 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07001989 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1990 I915_READ(GAMTARBMODE));
1991 else
1992 seq_printf(m, "ARB_MODE = 0x%08x\n",
1993 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001994 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1995 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001996 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001997
1998 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1999 seq_puts(m, "L-shaped memory detected\n");
2000
Chris Wilsona0371212019-01-14 14:21:14 +00002001 intel_runtime_pm_put(dev_priv, wakeref);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002002
2003 return 0;
2004}
2005
Chris Wilson7466c292016-08-15 09:49:33 +01002006static const char *rps_power_to_str(unsigned int power)
2007{
2008 static const char * const strings[] = {
2009 [LOW_POWER] = "low power",
2010 [BETWEEN] = "mixed",
2011 [HIGH_POWER] = "high power",
2012 };
2013
2014 if (power >= ARRAY_SIZE(strings) || !strings[power])
2015 return "unknown";
2016
2017 return strings[power];
2018}
2019
Chris Wilson1854d5c2015-04-07 16:20:32 +01002020static int i915_rps_boost_info(struct seq_file *m, void *data)
2021{
David Weinehall36cdd012016-08-22 13:59:31 +03002022 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002023 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002024 u32 act_freq = rps->cur_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00002025 intel_wakeref_t wakeref;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002026
Chris Wilsond4225a52019-01-14 14:21:23 +00002027 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002028 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2029 mutex_lock(&dev_priv->pcu_lock);
2030 act_freq = vlv_punit_read(dev_priv,
2031 PUNIT_REG_GPU_FREQ_STS);
2032 act_freq = (act_freq >> 8) & 0xff;
2033 mutex_unlock(&dev_priv->pcu_lock);
2034 } else {
2035 act_freq = intel_get_cagf(dev_priv,
2036 I915_READ(GEN6_RPSTAT1));
2037 }
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002038 }
2039
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002040 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson28176ef2016-10-28 13:58:56 +01002041 seq_printf(m, "GPU busy? %s [%d requests]\n",
2042 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002043 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002044 atomic_read(&rps->num_waiters));
Chris Wilson60548c52018-07-31 14:26:29 +01002045 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002046 seq_printf(m, "Frequency requested %d, actual %d\n",
2047 intel_gpu_freq(dev_priv, rps->cur_freq),
2048 intel_gpu_freq(dev_priv, act_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002049 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002050 intel_gpu_freq(dev_priv, rps->min_freq),
2051 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2052 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2053 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002054 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002055 intel_gpu_freq(dev_priv, rps->idle_freq),
2056 intel_gpu_freq(dev_priv, rps->efficient_freq),
2057 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002058
Chris Wilson62eb3c22019-02-13 09:25:04 +00002059 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002060
Chris Wilson7466c292016-08-15 09:49:33 +01002061 if (INTEL_GEN(dev_priv) >= 6 &&
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002062 rps->enabled &&
Chris Wilson28176ef2016-10-28 13:58:56 +01002063 dev_priv->gt.active_requests) {
Chris Wilson7466c292016-08-15 09:49:33 +01002064 u32 rpup, rpupei;
2065 u32 rpdown, rpdownei;
2066
2067 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2068 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2069 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2070 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2071 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2072 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2073
2074 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Chris Wilson60548c52018-07-31 14:26:29 +01002075 rps_power_to_str(rps->power.mode));
Chris Wilson7466c292016-08-15 09:49:33 +01002076 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002077 rpup && rpupei ? 100 * rpup / rpupei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002078 rps->power.up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002079 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002080 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002081 rps->power.down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002082 } else {
2083 seq_puts(m, "\nRPS Autotuning inactive\n");
2084 }
2085
Chris Wilson8d3afd72015-05-21 21:01:47 +01002086 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002087}
2088
Ben Widawsky63573eb2013-07-04 11:02:07 -07002089static int i915_llc(struct seq_file *m, void *data)
2090{
David Weinehall36cdd012016-08-22 13:59:31 +03002091 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002092 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002093
David Weinehall36cdd012016-08-22 13:59:31 +03002094 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002095 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2096 intel_uncore_edram_size(dev_priv)/1024/1024);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002097
2098 return 0;
2099}
2100
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002101static int i915_huc_load_status_info(struct seq_file *m, void *data)
2102{
2103 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002104 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002105 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002106
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002107 if (!HAS_HUC(dev_priv))
2108 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002109
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002110 p = drm_seq_file_printer(m);
2111 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002112
Chris Wilsond4225a52019-01-14 14:21:23 +00002113 with_intel_runtime_pm(dev_priv, wakeref)
2114 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002115
2116 return 0;
2117}
2118
Alex Daifdf5d352015-08-12 15:43:37 +01002119static int i915_guc_load_status_info(struct seq_file *m, void *data)
2120{
David Weinehall36cdd012016-08-22 13:59:31 +03002121 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002122 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002123 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002124
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002125 if (!HAS_GUC(dev_priv))
2126 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002127
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002128 p = drm_seq_file_printer(m);
2129 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002130
Chris Wilsond4225a52019-01-14 14:21:23 +00002131 with_intel_runtime_pm(dev_priv, wakeref) {
2132 u32 tmp = I915_READ(GUC_STATUS);
2133 u32 i;
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302134
Chris Wilsond4225a52019-01-14 14:21:23 +00002135 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2136 seq_printf(m, "\tBootrom status = 0x%x\n",
2137 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2138 seq_printf(m, "\tuKernel status = 0x%x\n",
2139 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2140 seq_printf(m, "\tMIA Core status = 0x%x\n",
2141 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2142 seq_puts(m, "\nScratch registers:\n");
2143 for (i = 0; i < 16; i++) {
2144 seq_printf(m, "\t%2d: \t0x%x\n",
2145 i, I915_READ(SOFT_SCRATCH(i)));
2146 }
2147 }
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302148
Alex Daifdf5d352015-08-12 15:43:37 +01002149 return 0;
2150}
2151
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002152static const char *
2153stringify_guc_log_type(enum guc_log_buffer_type type)
2154{
2155 switch (type) {
2156 case GUC_ISR_LOG_BUFFER:
2157 return "ISR";
2158 case GUC_DPC_LOG_BUFFER:
2159 return "DPC";
2160 case GUC_CRASH_DUMP_LOG_BUFFER:
2161 return "CRASH";
2162 default:
2163 MISSING_CASE(type);
2164 }
2165
2166 return "";
2167}
2168
Akash Goel5aa1ee42016-10-12 21:54:36 +05302169static void i915_guc_log_info(struct seq_file *m,
2170 struct drm_i915_private *dev_priv)
2171{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002172 struct intel_guc_log *log = &dev_priv->guc.log;
2173 enum guc_log_buffer_type type;
2174
2175 if (!intel_guc_log_relay_enabled(log)) {
2176 seq_puts(m, "GuC log relay disabled\n");
2177 return;
2178 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302179
Michał Winiarskidb557992018-03-19 10:53:43 +01002180 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302181
Michał Winiarski6a96be22018-03-19 10:53:42 +01002182 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002183 log->relay.full_count);
2184
2185 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2186 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2187 stringify_guc_log_type(type),
2188 log->stats[type].flush,
2189 log->stats[type].sampled_overflow);
2190 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302191}
2192
Dave Gordon8b417c22015-08-12 15:43:44 +01002193static void i915_guc_client_info(struct seq_file *m,
2194 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302195 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002196{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002197 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002198 enum intel_engine_id id;
Jani Nikulae5315212019-01-16 11:15:23 +02002199 u64 tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002200
Oscar Mateob09935a2017-03-22 10:39:53 -07002201 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2202 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002203 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2204 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002205
Akash Goel3b3f1652016-10-13 22:44:48 +05302206 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002207 u64 submissions = client->submissions[id];
2208 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002209 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002210 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002211 }
2212 seq_printf(m, "\tTotal: %llu\n", tot);
2213}
2214
2215static int i915_guc_info(struct seq_file *m, void *data)
2216{
David Weinehall36cdd012016-08-22 13:59:31 +03002217 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002218 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002219
Michał Winiarskidb557992018-03-19 10:53:43 +01002220 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002221 return -ENODEV;
2222
Michał Winiarskidb557992018-03-19 10:53:43 +01002223 i915_guc_log_info(m, dev_priv);
2224
2225 if (!USES_GUC_SUBMISSION(dev_priv))
2226 return 0;
2227
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002228 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002229
Michał Winiarskidb557992018-03-19 10:53:43 +01002230 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002231 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002232 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002233
Chris Wilson334636c2016-11-29 12:10:20 +00002234 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2235 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002236 if (guc->preempt_client) {
2237 seq_printf(m, "\nGuC preempt client @ %p:\n",
2238 guc->preempt_client);
2239 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2240 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002241
2242 /* Add more as required ... */
2243
2244 return 0;
2245}
2246
Oscar Mateoa8b93702017-05-10 15:04:51 +00002247static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002248{
David Weinehall36cdd012016-08-22 13:59:31 +03002249 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002250 const struct intel_guc *guc = &dev_priv->guc;
2251 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302252 struct intel_guc_client *client = guc->execbuf_client;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002253 unsigned int tmp;
2254 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002255
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002256 if (!USES_GUC_SUBMISSION(dev_priv))
2257 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002258
Oscar Mateoa8b93702017-05-10 15:04:51 +00002259 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2260 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002261
Oscar Mateoa8b93702017-05-10 15:04:51 +00002262 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2263 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002264
Oscar Mateoa8b93702017-05-10 15:04:51 +00002265 seq_printf(m, "GuC stage descriptor %u:\n", index);
2266 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2267 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2268 seq_printf(m, "\tPriority: %d\n", desc->priority);
2269 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2270 seq_printf(m, "\tEngines used: 0x%x\n",
2271 desc->engines_used);
2272 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2273 desc->db_trigger_phy,
2274 desc->db_trigger_cpu,
2275 desc->db_trigger_uk);
2276 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2277 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002278 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002279 desc->wq_addr, desc->wq_size);
2280 seq_putc(m, '\n');
2281
2282 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2283 u32 guc_engine_id = engine->guc_id;
2284 struct guc_execlist_context *lrc =
2285 &desc->lrc[guc_engine_id];
2286
2287 seq_printf(m, "\t%s LRC:\n", engine->name);
2288 seq_printf(m, "\t\tContext desc: 0x%x\n",
2289 lrc->context_desc);
2290 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2291 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2292 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2293 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2294 seq_putc(m, '\n');
2295 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002296 }
2297
Oscar Mateoa8b93702017-05-10 15:04:51 +00002298 return 0;
2299}
2300
Alex Dai4c7e77f2015-08-12 15:43:40 +01002301static int i915_guc_log_dump(struct seq_file *m, void *data)
2302{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002303 struct drm_info_node *node = m->private;
2304 struct drm_i915_private *dev_priv = node_to_i915(node);
2305 bool dump_load_err = !!node->info_ent->data;
2306 struct drm_i915_gem_object *obj = NULL;
2307 u32 *log;
2308 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002309
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002310 if (!HAS_GUC(dev_priv))
2311 return -ENODEV;
2312
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002313 if (dump_load_err)
2314 obj = dev_priv->guc.load_err_log;
2315 else if (dev_priv->guc.log.vma)
2316 obj = dev_priv->guc.log.vma->obj;
2317
2318 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002319 return 0;
2320
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002321 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2322 if (IS_ERR(log)) {
2323 DRM_DEBUG("Failed to pin object\n");
2324 seq_puts(m, "(log data unaccessible)\n");
2325 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002326 }
2327
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002328 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2329 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2330 *(log + i), *(log + i + 1),
2331 *(log + i + 2), *(log + i + 3));
2332
Alex Dai4c7e77f2015-08-12 15:43:40 +01002333 seq_putc(m, '\n');
2334
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002335 i915_gem_object_unpin_map(obj);
2336
Alex Dai4c7e77f2015-08-12 15:43:40 +01002337 return 0;
2338}
2339
Michał Winiarski4977a282018-03-19 10:53:40 +01002340static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302341{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002342 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302343
Michał Winiarski86aa8242018-03-08 16:46:53 +01002344 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002345 return -ENODEV;
2346
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002347 *val = intel_guc_log_get_level(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302348
2349 return 0;
2350}
2351
Michał Winiarski4977a282018-03-19 10:53:40 +01002352static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302353{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002354 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302355
Michał Winiarski86aa8242018-03-08 16:46:53 +01002356 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002357 return -ENODEV;
2358
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002359 return intel_guc_log_set_level(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302360}
2361
Michał Winiarski4977a282018-03-19 10:53:40 +01002362DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2363 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302364 "%lld\n");
2365
Michał Winiarski4977a282018-03-19 10:53:40 +01002366static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2367{
2368 struct drm_i915_private *dev_priv = inode->i_private;
2369
2370 if (!USES_GUC(dev_priv))
2371 return -ENODEV;
2372
2373 file->private_data = &dev_priv->guc.log;
2374
2375 return intel_guc_log_relay_open(&dev_priv->guc.log);
2376}
2377
2378static ssize_t
2379i915_guc_log_relay_write(struct file *filp,
2380 const char __user *ubuf,
2381 size_t cnt,
2382 loff_t *ppos)
2383{
2384 struct intel_guc_log *log = filp->private_data;
2385
2386 intel_guc_log_relay_flush(log);
2387
2388 return cnt;
2389}
2390
2391static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2392{
2393 struct drm_i915_private *dev_priv = inode->i_private;
2394
2395 intel_guc_log_relay_close(&dev_priv->guc.log);
2396
2397 return 0;
2398}
2399
2400static const struct file_operations i915_guc_log_relay_fops = {
2401 .owner = THIS_MODULE,
2402 .open = i915_guc_log_relay_open,
2403 .write = i915_guc_log_relay_write,
2404 .release = i915_guc_log_relay_release,
2405};
2406
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002407static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2408{
2409 u8 val;
2410 static const char * const sink_status[] = {
2411 "inactive",
2412 "transition to active, capture and display",
2413 "active, display from RFB",
2414 "active, capture and display on sink device timings",
2415 "transition to inactive, capture and display, timing re-sync",
2416 "reserved",
2417 "reserved",
2418 "sink internal error",
2419 };
2420 struct drm_connector *connector = m->private;
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002421 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002422 struct intel_dp *intel_dp =
2423 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002424 int ret;
2425
2426 if (!CAN_PSR(dev_priv)) {
2427 seq_puts(m, "PSR Unsupported\n");
2428 return -ENODEV;
2429 }
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002430
2431 if (connector->status != connector_status_connected)
2432 return -ENODEV;
2433
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002434 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2435
2436 if (ret == 1) {
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002437 const char *str = "unknown";
2438
2439 val &= DP_PSR_SINK_STATE_MASK;
2440 if (val < ARRAY_SIZE(sink_status))
2441 str = sink_status[val];
2442 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2443 } else {
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002444 return ret;
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002445 }
2446
2447 return 0;
2448}
2449DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2450
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302451static void
2452psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002453{
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002454 u32 val, status_val;
2455 const char *status = "unknown";
Chris Wilsonb86bef202017-01-16 13:06:21 +00002456
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302457 if (dev_priv->psr.psr2_enabled) {
2458 static const char * const live_status[] = {
2459 "IDLE",
2460 "CAPTURE",
2461 "CAPTURE_FS",
2462 "SLEEP",
2463 "BUFON_FW",
2464 "ML_UP",
2465 "SU_STANDBY",
2466 "FAST_SLEEP",
2467 "DEEP_SLEEP",
2468 "BUF_ON",
2469 "TG_ON"
2470 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002471 val = I915_READ(EDP_PSR2_STATUS);
2472 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2473 EDP_PSR2_STATUS_STATE_SHIFT;
2474 if (status_val < ARRAY_SIZE(live_status))
2475 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302476 } else {
2477 static const char * const live_status[] = {
2478 "IDLE",
2479 "SRDONACK",
2480 "SRDENT",
2481 "BUFOFF",
2482 "BUFON",
2483 "AUXACK",
2484 "SRDOFFACK",
2485 "SRDENT_ON",
2486 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002487 val = I915_READ(EDP_PSR_STATUS);
2488 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2489 EDP_PSR_STATUS_STATE_SHIFT;
2490 if (status_val < ARRAY_SIZE(live_status))
2491 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302492 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002493
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002494 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
Chris Wilsonb86bef202017-01-16 13:06:21 +00002495}
2496
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002497static int i915_edp_psr_status(struct seq_file *m, void *data)
2498{
David Weinehall36cdd012016-08-22 13:59:31 +03002499 struct drm_i915_private *dev_priv = node_to_i915(m->private);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002500 struct i915_psr *psr = &dev_priv->psr;
Chris Wilsona0371212019-01-14 14:21:14 +00002501 intel_wakeref_t wakeref;
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002502 const char *status;
2503 bool enabled;
2504 u32 val;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002505
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002506 if (!HAS_PSR(dev_priv))
2507 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002508
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002509 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2510 if (psr->dp)
2511 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2512 seq_puts(m, "\n");
2513
2514 if (!psr->sink_support)
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002515 return 0;
2516
Chris Wilsona0371212019-01-14 14:21:14 +00002517 wakeref = intel_runtime_pm_get(dev_priv);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002518 mutex_lock(&psr->lock);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002519
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002520 if (psr->enabled)
2521 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002522 else
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002523 status = "disabled";
2524 seq_printf(m, "PSR mode: %s\n", status);
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002525
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002526 if (!psr->enabled)
2527 goto unlock;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002528
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002529 if (psr->psr2_enabled) {
2530 val = I915_READ(EDP_PSR2_CTL);
2531 enabled = val & EDP_PSR2_ENABLE;
2532 } else {
2533 val = I915_READ(EDP_PSR_CTL);
2534 enabled = val & EDP_PSR_ENABLE;
2535 }
2536 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2537 enableddisabled(enabled), val);
2538 psr_source_status(dev_priv, m);
2539 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2540 psr->busy_frontbuffer_bits);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002541
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002542 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002543 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2544 */
David Weinehall36cdd012016-08-22 13:59:31 +03002545 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002546 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2547 seq_printf(m, "Performance counter: %u\n", val);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002548 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302549
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002550 if (psr->debug & I915_PSR_DEBUG_IRQ) {
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002551 seq_printf(m, "Last attempted entry at: %lld\n",
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002552 psr->last_entry_attempt);
2553 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002554 }
2555
José Roberto de Souzaa81f7812019-01-17 12:55:48 -08002556 if (psr->psr2_enabled) {
2557 u32 su_frames_val[3];
2558 int frame;
2559
2560 /*
2561 * Reading all 3 registers before hand to minimize crossing a
2562 * frame boundary between register reads
2563 */
2564 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2565 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2566
2567 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2568
2569 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2570 u32 su_blocks;
2571
2572 su_blocks = su_frames_val[frame / 3] &
2573 PSR2_SU_STATUS_MASK(frame);
2574 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2575 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2576 }
2577 }
2578
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002579unlock:
2580 mutex_unlock(&psr->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00002581 intel_runtime_pm_put(dev_priv, wakeref);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002582
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002583 return 0;
2584}
2585
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002586static int
2587i915_edp_psr_debug_set(void *data, u64 val)
2588{
2589 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00002590 intel_wakeref_t wakeref;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002591 int ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002592
2593 if (!CAN_PSR(dev_priv))
2594 return -ENODEV;
2595
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002596 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002597
Chris Wilsona0371212019-01-14 14:21:14 +00002598 wakeref = intel_runtime_pm_get(dev_priv);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002599
José Roberto de Souza23ec9f52019-02-06 13:18:45 -08002600 ret = intel_psr_debug_set(dev_priv, val);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002601
Chris Wilsona0371212019-01-14 14:21:14 +00002602 intel_runtime_pm_put(dev_priv, wakeref);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002603
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002604 return ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002605}
2606
2607static int
2608i915_edp_psr_debug_get(void *data, u64 *val)
2609{
2610 struct drm_i915_private *dev_priv = data;
2611
2612 if (!CAN_PSR(dev_priv))
2613 return -ENODEV;
2614
2615 *val = READ_ONCE(dev_priv->psr.debug);
2616 return 0;
2617}
2618
2619DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2620 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2621 "%llu\n");
2622
Jesse Barnesec013e72013-08-20 10:29:23 +01002623static int i915_energy_uJ(struct seq_file *m, void *data)
2624{
David Weinehall36cdd012016-08-22 13:59:31 +03002625 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002626 unsigned long long power;
Chris Wilsona0371212019-01-14 14:21:14 +00002627 intel_wakeref_t wakeref;
Jesse Barnesec013e72013-08-20 10:29:23 +01002628 u32 units;
2629
David Weinehall36cdd012016-08-22 13:59:31 +03002630 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002631 return -ENODEV;
2632
Chris Wilsond4225a52019-01-14 14:21:23 +00002633 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002634 return -ENODEV;
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002635
2636 units = (power & 0x1f00) >> 8;
Chris Wilsond4225a52019-01-14 14:21:23 +00002637 with_intel_runtime_pm(dev_priv, wakeref)
2638 power = I915_READ(MCH_SECP_NRG_STTS);
2639
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002640 power = (1000000 * power) >> units; /* convert to uJ */
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002641 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002642
2643 return 0;
2644}
2645
Damien Lespiau6455c872015-06-04 18:23:57 +01002646static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002647{
David Weinehall36cdd012016-08-22 13:59:31 +03002648 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002649 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002650
Chris Wilsona156e642016-04-03 14:14:21 +01002651 if (!HAS_RUNTIME_PM(dev_priv))
2652 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002653
Chris Wilson25c896bd2019-01-14 14:21:25 +00002654 seq_printf(m, "Runtime power status: %s\n",
2655 enableddisabled(!dev_priv->power_domains.wakeref));
2656
Chris Wilson6f561032018-01-24 11:36:07 +00002657 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2658 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
Paulo Zanoni371db662013-08-19 13:18:10 -03002659 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002660 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002661#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002662 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002663 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002664#else
2665 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2666#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002667 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002668 pci_power_name(pdev->current_state),
2669 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002670
Chris Wilsonbd780f32019-01-14 14:21:09 +00002671 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2672 struct drm_printer p = drm_seq_file_printer(m);
2673
2674 print_intel_runtime_pm_wakeref(dev_priv, &p);
2675 }
2676
Jesse Barnesec013e72013-08-20 10:29:23 +01002677 return 0;
2678}
2679
Imre Deak1da51582013-11-25 17:15:35 +02002680static int i915_power_domain_info(struct seq_file *m, void *unused)
2681{
David Weinehall36cdd012016-08-22 13:59:31 +03002682 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002683 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2684 int i;
2685
2686 mutex_lock(&power_domains->lock);
2687
2688 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2689 for (i = 0; i < power_domains->power_well_count; i++) {
2690 struct i915_power_well *power_well;
2691 enum intel_display_power_domain power_domain;
2692
2693 power_well = &power_domains->power_wells[i];
Imre Deakf28ec6f2018-08-06 12:58:37 +03002694 seq_printf(m, "%-25s %d\n", power_well->desc->name,
Imre Deak1da51582013-11-25 17:15:35 +02002695 power_well->count);
2696
Imre Deakf28ec6f2018-08-06 12:58:37 +03002697 for_each_power_domain(power_domain, power_well->desc->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002698 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002699 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002700 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002701 }
2702
2703 mutex_unlock(&power_domains->lock);
2704
2705 return 0;
2706}
2707
Damien Lespiaub7cec662015-10-27 14:47:01 +02002708static int i915_dmc_info(struct seq_file *m, void *unused)
2709{
David Weinehall36cdd012016-08-22 13:59:31 +03002710 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002711 intel_wakeref_t wakeref;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002712 struct intel_csr *csr;
2713
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002714 if (!HAS_CSR(dev_priv))
2715 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002716
2717 csr = &dev_priv->csr;
2718
Chris Wilsona0371212019-01-14 14:21:14 +00002719 wakeref = intel_runtime_pm_get(dev_priv);
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002720
Damien Lespiaub7cec662015-10-27 14:47:01 +02002721 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2722 seq_printf(m, "path: %s\n", csr->fw_path);
2723
2724 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002725 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002726
2727 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2728 CSR_VERSION_MINOR(csr->version));
2729
Imre Deak34b2f8d2018-10-31 22:02:20 +02002730 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2731 goto out;
2732
2733 seq_printf(m, "DC3 -> DC5 count: %d\n",
2734 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2735 SKL_CSR_DC3_DC5_COUNT));
2736 if (!IS_GEN9_LP(dev_priv))
Damien Lespiau83372062015-10-30 17:53:32 +02002737 seq_printf(m, "DC5 -> DC6 count: %d\n",
2738 I915_READ(SKL_CSR_DC5_DC6_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002739
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002740out:
2741 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2742 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2743 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2744
Chris Wilsona0371212019-01-14 14:21:14 +00002745 intel_runtime_pm_put(dev_priv, wakeref);
Damien Lespiau83372062015-10-30 17:53:32 +02002746
Damien Lespiaub7cec662015-10-27 14:47:01 +02002747 return 0;
2748}
2749
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002750static void intel_seq_print_mode(struct seq_file *m, int tabs,
2751 struct drm_display_mode *mode)
2752{
2753 int i;
2754
2755 for (i = 0; i < tabs; i++)
2756 seq_putc(m, '\t');
2757
Shayenne Moura4fb6bb82018-12-20 10:27:57 -02002758 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002759}
2760
2761static void intel_encoder_info(struct seq_file *m,
2762 struct intel_crtc *intel_crtc,
2763 struct intel_encoder *intel_encoder)
2764{
David Weinehall36cdd012016-08-22 13:59:31 +03002765 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2766 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002767 struct drm_crtc *crtc = &intel_crtc->base;
2768 struct intel_connector *intel_connector;
2769 struct drm_encoder *encoder;
2770
2771 encoder = &intel_encoder->base;
2772 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002773 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002774 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2775 struct drm_connector *connector = &intel_connector->base;
2776 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2777 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002778 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002779 drm_get_connector_status_name(connector->status));
2780 if (connector->status == connector_status_connected) {
2781 struct drm_display_mode *mode = &crtc->mode;
2782 seq_printf(m, ", mode:\n");
2783 intel_seq_print_mode(m, 2, mode);
2784 } else {
2785 seq_putc(m, '\n');
2786 }
2787 }
2788}
2789
2790static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2791{
David Weinehall36cdd012016-08-22 13:59:31 +03002792 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2793 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002794 struct drm_crtc *crtc = &intel_crtc->base;
2795 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002796 struct drm_plane_state *plane_state = crtc->primary->state;
2797 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002798
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002799 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002800 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002801 fb->base.id, plane_state->src_x >> 16,
2802 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002803 else
2804 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002805 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2806 intel_encoder_info(m, intel_crtc, intel_encoder);
2807}
2808
2809static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2810{
2811 struct drm_display_mode *mode = panel->fixed_mode;
2812
2813 seq_printf(m, "\tfixed mode:\n");
2814 intel_seq_print_mode(m, 2, mode);
2815}
2816
2817static void intel_dp_info(struct seq_file *m,
2818 struct intel_connector *intel_connector)
2819{
2820 struct intel_encoder *intel_encoder = intel_connector->encoder;
2821 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2822
2823 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03002824 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02002825 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002826 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03002827
2828 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2829 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002830}
2831
Libin Yang9a148a92016-11-28 20:07:05 +08002832static void intel_dp_mst_info(struct seq_file *m,
2833 struct intel_connector *intel_connector)
2834{
2835 struct intel_encoder *intel_encoder = intel_connector->encoder;
2836 struct intel_dp_mst_encoder *intel_mst =
2837 enc_to_mst(&intel_encoder->base);
2838 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2839 struct intel_dp *intel_dp = &intel_dig_port->dp;
2840 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2841 intel_connector->port);
2842
2843 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2844}
2845
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002846static void intel_hdmi_info(struct seq_file *m,
2847 struct intel_connector *intel_connector)
2848{
2849 struct intel_encoder *intel_encoder = intel_connector->encoder;
2850 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2851
Jani Nikula742f4912015-09-03 11:16:09 +03002852 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002853}
2854
2855static void intel_lvds_info(struct seq_file *m,
2856 struct intel_connector *intel_connector)
2857{
2858 intel_panel_info(m, &intel_connector->panel);
2859}
2860
2861static void intel_connector_info(struct seq_file *m,
2862 struct drm_connector *connector)
2863{
2864 struct intel_connector *intel_connector = to_intel_connector(connector);
2865 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08002866 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002867
2868 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03002869 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002870 drm_get_connector_status_name(connector->status));
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002871
2872 if (connector->status == connector_status_disconnected)
2873 return;
2874
2875 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2876 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2877 connector->display_info.width_mm,
2878 connector->display_info.height_mm);
2879 seq_printf(m, "\tsubpixel order: %s\n",
2880 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2881 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002882
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02002883 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002884 return;
2885
2886 switch (connector->connector_type) {
2887 case DRM_MODE_CONNECTOR_DisplayPort:
2888 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08002889 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2890 intel_dp_mst_info(m, intel_connector);
2891 else
2892 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002893 break;
2894 case DRM_MODE_CONNECTOR_LVDS:
2895 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10002896 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002897 break;
2898 case DRM_MODE_CONNECTOR_HDMIA:
2899 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03002900 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002901 intel_hdmi_info(m, intel_connector);
2902 break;
2903 default:
2904 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10002905 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002906
Jesse Barnesf103fc72014-02-20 12:39:57 -08002907 seq_printf(m, "\tmodes:\n");
2908 list_for_each_entry(mode, &connector->modes, head)
2909 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002910}
2911
Robert Fekete3abc4e02015-10-27 16:58:32 +01002912static const char *plane_type(enum drm_plane_type type)
2913{
2914 switch (type) {
2915 case DRM_PLANE_TYPE_OVERLAY:
2916 return "OVL";
2917 case DRM_PLANE_TYPE_PRIMARY:
2918 return "PRI";
2919 case DRM_PLANE_TYPE_CURSOR:
2920 return "CUR";
2921 /*
2922 * Deliberately omitting default: to generate compiler warnings
2923 * when a new drm_plane_type gets added.
2924 */
2925 }
2926
2927 return "unknown";
2928}
2929
Jani Nikula5852a152019-01-07 16:51:49 +02002930static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
Robert Fekete3abc4e02015-10-27 16:58:32 +01002931{
Robert Fekete3abc4e02015-10-27 16:58:32 +01002932 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04002933 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01002934 * will print them all to visualize if the values are misused
2935 */
Jani Nikula5852a152019-01-07 16:51:49 +02002936 snprintf(buf, bufsize,
Robert Fekete3abc4e02015-10-27 16:58:32 +01002937 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04002938 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2939 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2940 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2941 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2942 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2943 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01002944 rotation);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002945}
2946
2947static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2948{
David Weinehall36cdd012016-08-22 13:59:31 +03002949 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2950 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01002951 struct intel_plane *intel_plane;
2952
2953 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2954 struct drm_plane_state *state;
2955 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002956 struct drm_format_name_buf format_name;
Jani Nikula5852a152019-01-07 16:51:49 +02002957 char rot_str[48];
Robert Fekete3abc4e02015-10-27 16:58:32 +01002958
2959 if (!plane->state) {
2960 seq_puts(m, "plane->state is NULL!\n");
2961 continue;
2962 }
2963
2964 state = plane->state;
2965
Eric Engestrom90844f02016-08-15 01:02:38 +01002966 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002967 drm_get_format_name(state->fb->format->format,
2968 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01002969 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002970 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01002971 }
2972
Jani Nikula5852a152019-01-07 16:51:49 +02002973 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2974
Robert Fekete3abc4e02015-10-27 16:58:32 +01002975 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2976 plane->base.id,
2977 plane_type(intel_plane->base.type),
2978 state->crtc_x, state->crtc_y,
2979 state->crtc_w, state->crtc_h,
2980 (state->src_x >> 16),
2981 ((state->src_x & 0xffff) * 15625) >> 10,
2982 (state->src_y >> 16),
2983 ((state->src_y & 0xffff) * 15625) >> 10,
2984 (state->src_w >> 16),
2985 ((state->src_w & 0xffff) * 15625) >> 10,
2986 (state->src_h >> 16),
2987 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002988 format_name.str,
Jani Nikula5852a152019-01-07 16:51:49 +02002989 rot_str);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002990 }
2991}
2992
2993static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2994{
2995 struct intel_crtc_state *pipe_config;
2996 int num_scalers = intel_crtc->num_scalers;
2997 int i;
2998
2999 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3000
3001 /* Not all platformas have a scaler */
3002 if (num_scalers) {
3003 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3004 num_scalers,
3005 pipe_config->scaler_state.scaler_users,
3006 pipe_config->scaler_state.scaler_id);
3007
A.Sunil Kamath58415912016-11-20 23:20:26 +05303008 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003009 struct intel_scaler *sc =
3010 &pipe_config->scaler_state.scalers[i];
3011
3012 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3013 i, yesno(sc->in_use), sc->mode);
3014 }
3015 seq_puts(m, "\n");
3016 } else {
3017 seq_puts(m, "\tNo scalers available on this platform\n");
3018 }
3019}
3020
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003021static int i915_display_info(struct seq_file *m, void *unused)
3022{
David Weinehall36cdd012016-08-22 13:59:31 +03003023 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3024 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec2014-03-12 09:13:13 +00003025 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003026 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003027 struct drm_connector_list_iter conn_iter;
Chris Wilsona0371212019-01-14 14:21:14 +00003028 intel_wakeref_t wakeref;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003029
Chris Wilsona0371212019-01-14 14:21:14 +00003030 wakeref = intel_runtime_pm_get(dev_priv);
3031
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003032 seq_printf(m, "CRTC info\n");
3033 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003034 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003035 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003036
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003037 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003038 pipe_config = to_intel_crtc_state(crtc->base.state);
3039
Robert Fekete3abc4e02015-10-27 16:58:32 +01003040 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec2014-03-12 09:13:13 +00003041 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003042 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003043 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3044 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3045
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003046 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003047 struct intel_plane *cursor =
3048 to_intel_plane(crtc->base.cursor);
3049
Chris Wilson065f2ec2014-03-12 09:13:13 +00003050 intel_crtc_info(m, crtc);
3051
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003052 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3053 yesno(cursor->base.state->visible),
3054 cursor->base.state->crtc_x,
3055 cursor->base.state->crtc_y,
3056 cursor->base.state->crtc_w,
3057 cursor->base.state->crtc_h,
3058 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003059 intel_scaler_info(m, crtc);
3060 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003061 }
Daniel Vettercace8412014-05-22 17:56:31 +02003062
3063 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3064 yesno(!crtc->cpu_fifo_underrun_disabled),
3065 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003066 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003067 }
3068
3069 seq_printf(m, "\n");
3070 seq_printf(m, "Connector info\n");
3071 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003072 mutex_lock(&dev->mode_config.mutex);
3073 drm_connector_list_iter_begin(dev, &conn_iter);
3074 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003075 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003076 drm_connector_list_iter_end(&conn_iter);
3077 mutex_unlock(&dev->mode_config.mutex);
3078
Chris Wilsona0371212019-01-14 14:21:14 +00003079 intel_runtime_pm_put(dev_priv, wakeref);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003080
3081 return 0;
3082}
3083
Chris Wilson1b365952016-10-04 21:11:31 +01003084static int i915_engine_info(struct seq_file *m, void *unused)
3085{
3086 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3087 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +00003088 intel_wakeref_t wakeref;
Akash Goel3b3f1652016-10-13 22:44:48 +05303089 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003090 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003091
Chris Wilsona0371212019-01-14 14:21:14 +00003092 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01003093
Chris Wilson6f561032018-01-24 11:36:07 +00003094 seq_printf(m, "GT awake? %s (epoch %u)\n",
3095 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003096 seq_printf(m, "Global active requests: %d\n",
3097 dev_priv->gt.active_requests);
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003098 seq_printf(m, "CS timestamp frequency: %u kHz\n",
Jani Nikula02584042018-12-31 16:56:41 +02003099 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003100
Chris Wilsonf636edb2017-10-09 12:02:57 +01003101 p = drm_seq_file_printer(m);
3102 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003103 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003104
Chris Wilsona0371212019-01-14 14:21:14 +00003105 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilson9c870d02016-10-24 13:42:15 +01003106
Chris Wilson1b365952016-10-04 21:11:31 +01003107 return 0;
3108}
3109
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003110static int i915_rcs_topology(struct seq_file *m, void *unused)
3111{
3112 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3113 struct drm_printer p = drm_seq_file_printer(m);
3114
Jani Nikula02584042018-12-31 16:56:41 +02003115 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003116
3117 return 0;
3118}
3119
Chris Wilsonc5418a82017-10-13 21:26:19 +01003120static int i915_shrinker_info(struct seq_file *m, void *unused)
3121{
3122 struct drm_i915_private *i915 = node_to_i915(m->private);
3123
3124 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3125 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3126
3127 return 0;
3128}
3129
Daniel Vetter728e29d2014-06-25 22:01:53 +03003130static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3131{
David Weinehall36cdd012016-08-22 13:59:31 +03003132 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3133 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003134 int i;
3135
3136 drm_modeset_lock_all(dev);
3137 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3138 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3139
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003140 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003141 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003142 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003143 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003144 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003145 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003146 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003147 pll->state.hw_state.dpll_md);
3148 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3149 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3150 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07003151 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3152 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3153 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3154 pll->state.hw_state.mg_refclkin_ctl);
3155 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3156 pll->state.hw_state.mg_clktop2_coreclkctl1);
3157 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3158 pll->state.hw_state.mg_clktop2_hsclkctl);
3159 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3160 pll->state.hw_state.mg_pll_div0);
3161 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3162 pll->state.hw_state.mg_pll_div1);
3163 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3164 pll->state.hw_state.mg_pll_lf);
3165 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3166 pll->state.hw_state.mg_pll_frac_lock);
3167 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3168 pll->state.hw_state.mg_pll_ssc);
3169 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3170 pll->state.hw_state.mg_pll_bias);
3171 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3172 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003173 }
3174 drm_modeset_unlock_all(dev);
3175
3176 return 0;
3177}
3178
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003179static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003180{
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003181 struct drm_i915_private *i915 = node_to_i915(m->private);
3182 const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3183 struct i915_wa *wa;
3184 unsigned int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003185
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003186 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3187 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
Chris Wilson548764b2018-06-15 13:02:07 +01003188 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003189 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
Arun Siluvery888b5992014-08-26 14:44:51 +01003190
3191 return 0;
3192}
3193
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303194static int i915_ipc_status_show(struct seq_file *m, void *data)
3195{
3196 struct drm_i915_private *dev_priv = m->private;
3197
3198 seq_printf(m, "Isochronous Priority Control: %s\n",
3199 yesno(dev_priv->ipc_enabled));
3200 return 0;
3201}
3202
3203static int i915_ipc_status_open(struct inode *inode, struct file *file)
3204{
3205 struct drm_i915_private *dev_priv = inode->i_private;
3206
3207 if (!HAS_IPC(dev_priv))
3208 return -ENODEV;
3209
3210 return single_open(file, i915_ipc_status_show, dev_priv);
3211}
3212
3213static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3214 size_t len, loff_t *offp)
3215{
3216 struct seq_file *m = file->private_data;
3217 struct drm_i915_private *dev_priv = m->private;
Chris Wilsona0371212019-01-14 14:21:14 +00003218 intel_wakeref_t wakeref;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303219 bool enable;
Chris Wilsond4225a52019-01-14 14:21:23 +00003220 int ret;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303221
3222 ret = kstrtobool_from_user(ubuf, len, &enable);
3223 if (ret < 0)
3224 return ret;
3225
Chris Wilsond4225a52019-01-14 14:21:23 +00003226 with_intel_runtime_pm(dev_priv, wakeref) {
3227 if (!dev_priv->ipc_enabled && enable)
3228 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3229 dev_priv->wm.distrust_bios_wm = true;
3230 dev_priv->ipc_enabled = enable;
3231 intel_enable_ipc(dev_priv);
3232 }
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303233
3234 return len;
3235}
3236
3237static const struct file_operations i915_ipc_status_fops = {
3238 .owner = THIS_MODULE,
3239 .open = i915_ipc_status_open,
3240 .read = seq_read,
3241 .llseek = seq_lseek,
3242 .release = single_release,
3243 .write = i915_ipc_status_write
3244};
3245
Damien Lespiauc5511e42014-11-04 17:06:51 +00003246static int i915_ddb_info(struct seq_file *m, void *unused)
3247{
David Weinehall36cdd012016-08-22 13:59:31 +03003248 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3249 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003250 struct skl_ddb_entry *entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003251 struct intel_crtc *crtc;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003252
David Weinehall36cdd012016-08-22 13:59:31 +03003253 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003254 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003255
Damien Lespiauc5511e42014-11-04 17:06:51 +00003256 drm_modeset_lock_all(dev);
3257
Damien Lespiauc5511e42014-11-04 17:06:51 +00003258 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3259
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003260 for_each_intel_crtc(&dev_priv->drm, crtc) {
3261 struct intel_crtc_state *crtc_state =
3262 to_intel_crtc_state(crtc->base.state);
3263 enum pipe pipe = crtc->pipe;
3264 enum plane_id plane_id;
3265
Damien Lespiauc5511e42014-11-04 17:06:51 +00003266 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3267
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003268 for_each_plane_id_on_crtc(crtc, plane_id) {
3269 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3270 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
Damien Lespiauc5511e42014-11-04 17:06:51 +00003271 entry->start, entry->end,
3272 skl_ddb_entry_size(entry));
3273 }
3274
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003275 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003276 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3277 entry->end, skl_ddb_entry_size(entry));
3278 }
3279
3280 drm_modeset_unlock_all(dev);
3281
3282 return 0;
3283}
3284
Vandana Kannana54746e2015-03-03 20:53:10 +05303285static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003286 struct drm_device *dev,
3287 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303288{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003289 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303290 struct i915_drrs *drrs = &dev_priv->drrs;
3291 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003292 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003293 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303294
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003295 drm_connector_list_iter_begin(dev, &conn_iter);
3296 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003297 if (connector->state->crtc != &intel_crtc->base)
3298 continue;
3299
3300 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303301 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003302 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303303
3304 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3305 seq_puts(m, "\tVBT: DRRS_type: Static");
3306 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3307 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3308 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3309 seq_puts(m, "\tVBT: DRRS_type: None");
3310 else
3311 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3312
3313 seq_puts(m, "\n\n");
3314
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003315 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303316 struct intel_panel *panel;
3317
3318 mutex_lock(&drrs->mutex);
3319 /* DRRS Supported */
3320 seq_puts(m, "\tDRRS Supported: Yes\n");
3321
3322 /* disable_drrs() will make drrs->dp NULL */
3323 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303324 seq_puts(m, "Idleness DRRS: Disabled\n");
3325 if (dev_priv->psr.enabled)
3326 seq_puts(m,
3327 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303328 mutex_unlock(&drrs->mutex);
3329 return;
3330 }
3331
3332 panel = &drrs->dp->attached_connector->panel;
3333 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3334 drrs->busy_frontbuffer_bits);
3335
3336 seq_puts(m, "\n\t\t");
3337 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3338 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3339 vrefresh = panel->fixed_mode->vrefresh;
3340 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3341 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3342 vrefresh = panel->downclock_mode->vrefresh;
3343 } else {
3344 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3345 drrs->refresh_rate_type);
3346 mutex_unlock(&drrs->mutex);
3347 return;
3348 }
3349 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3350
3351 seq_puts(m, "\n\t\t");
3352 mutex_unlock(&drrs->mutex);
3353 } else {
3354 /* DRRS not supported. Print the VBT parameter*/
3355 seq_puts(m, "\tDRRS Supported : No");
3356 }
3357 seq_puts(m, "\n");
3358}
3359
3360static int i915_drrs_status(struct seq_file *m, void *unused)
3361{
David Weinehall36cdd012016-08-22 13:59:31 +03003362 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3363 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303364 struct intel_crtc *intel_crtc;
3365 int active_crtc_cnt = 0;
3366
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003367 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303368 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003369 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303370 active_crtc_cnt++;
3371 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3372
3373 drrs_status_per_crtc(m, dev, intel_crtc);
3374 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303375 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003376 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303377
3378 if (!active_crtc_cnt)
3379 seq_puts(m, "No active crtc found\n");
3380
3381 return 0;
3382}
3383
Dave Airlie11bed952014-05-12 15:22:27 +10003384static int i915_dp_mst_info(struct seq_file *m, void *unused)
3385{
David Weinehall36cdd012016-08-22 13:59:31 +03003386 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3387 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003388 struct intel_encoder *intel_encoder;
3389 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003390 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003391 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003392
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003393 drm_connector_list_iter_begin(dev, &conn_iter);
3394 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003395 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003396 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003397
3398 intel_encoder = intel_attached_encoder(connector);
3399 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3400 continue;
3401
3402 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003403 if (!intel_dig_port->dp.can_mst)
3404 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003405
Jim Bride40ae80c2016-04-14 10:18:37 -07003406 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003407 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003408 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3409 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003410 drm_connector_list_iter_end(&conn_iter);
3411
Dave Airlie11bed952014-05-12 15:22:27 +10003412 return 0;
3413}
3414
Todd Previteeb3394fa2015-04-18 00:04:19 -07003415static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003416 const char __user *ubuf,
3417 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003418{
3419 char *input_buffer;
3420 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003421 struct drm_device *dev;
3422 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003423 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003424 struct intel_dp *intel_dp;
3425 int val = 0;
3426
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303427 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003428
Todd Previteeb3394fa2015-04-18 00:04:19 -07003429 if (len == 0)
3430 return 0;
3431
Geliang Tang261aeba2017-05-06 23:40:17 +08003432 input_buffer = memdup_user_nul(ubuf, len);
3433 if (IS_ERR(input_buffer))
3434 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003435
Todd Previteeb3394fa2015-04-18 00:04:19 -07003436 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3437
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003438 drm_connector_list_iter_begin(dev, &conn_iter);
3439 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003440 struct intel_encoder *encoder;
3441
Todd Previteeb3394fa2015-04-18 00:04:19 -07003442 if (connector->connector_type !=
3443 DRM_MODE_CONNECTOR_DisplayPort)
3444 continue;
3445
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003446 encoder = to_intel_encoder(connector->encoder);
3447 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3448 continue;
3449
3450 if (encoder && connector->status == connector_status_connected) {
3451 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003452 status = kstrtoint(input_buffer, 10, &val);
3453 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003454 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003455 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3456 /* To prevent erroneous activation of the compliance
3457 * testing code, only accept an actual value of 1 here
3458 */
3459 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003460 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003461 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003462 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003463 }
3464 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003465 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003466 kfree(input_buffer);
3467 if (status < 0)
3468 return status;
3469
3470 *offp += len;
3471 return len;
3472}
3473
3474static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3475{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003476 struct drm_i915_private *dev_priv = m->private;
3477 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003478 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003479 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003480 struct intel_dp *intel_dp;
3481
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003482 drm_connector_list_iter_begin(dev, &conn_iter);
3483 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003484 struct intel_encoder *encoder;
3485
Todd Previteeb3394fa2015-04-18 00:04:19 -07003486 if (connector->connector_type !=
3487 DRM_MODE_CONNECTOR_DisplayPort)
3488 continue;
3489
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003490 encoder = to_intel_encoder(connector->encoder);
3491 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3492 continue;
3493
3494 if (encoder && connector->status == connector_status_connected) {
3495 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003496 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003497 seq_puts(m, "1");
3498 else
3499 seq_puts(m, "0");
3500 } else
3501 seq_puts(m, "0");
3502 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003503 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003504
3505 return 0;
3506}
3507
3508static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003509 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003510{
David Weinehall36cdd012016-08-22 13:59:31 +03003511 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003512 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003513}
3514
3515static const struct file_operations i915_displayport_test_active_fops = {
3516 .owner = THIS_MODULE,
3517 .open = i915_displayport_test_active_open,
3518 .read = seq_read,
3519 .llseek = seq_lseek,
3520 .release = single_release,
3521 .write = i915_displayport_test_active_write
3522};
3523
3524static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3525{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003526 struct drm_i915_private *dev_priv = m->private;
3527 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003528 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003529 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003530 struct intel_dp *intel_dp;
3531
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003532 drm_connector_list_iter_begin(dev, &conn_iter);
3533 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003534 struct intel_encoder *encoder;
3535
Todd Previteeb3394fa2015-04-18 00:04:19 -07003536 if (connector->connector_type !=
3537 DRM_MODE_CONNECTOR_DisplayPort)
3538 continue;
3539
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003540 encoder = to_intel_encoder(connector->encoder);
3541 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3542 continue;
3543
3544 if (encoder && connector->status == connector_status_connected) {
3545 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003546 if (intel_dp->compliance.test_type ==
3547 DP_TEST_LINK_EDID_READ)
3548 seq_printf(m, "%lx",
3549 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003550 else if (intel_dp->compliance.test_type ==
3551 DP_TEST_LINK_VIDEO_PATTERN) {
3552 seq_printf(m, "hdisplay: %d\n",
3553 intel_dp->compliance.test_data.hdisplay);
3554 seq_printf(m, "vdisplay: %d\n",
3555 intel_dp->compliance.test_data.vdisplay);
3556 seq_printf(m, "bpc: %u\n",
3557 intel_dp->compliance.test_data.bpc);
3558 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003559 } else
3560 seq_puts(m, "0");
3561 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003562 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003563
3564 return 0;
3565}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003566DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003567
3568static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3569{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003570 struct drm_i915_private *dev_priv = m->private;
3571 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003572 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003573 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003574 struct intel_dp *intel_dp;
3575
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003576 drm_connector_list_iter_begin(dev, &conn_iter);
3577 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003578 struct intel_encoder *encoder;
3579
Todd Previteeb3394fa2015-04-18 00:04:19 -07003580 if (connector->connector_type !=
3581 DRM_MODE_CONNECTOR_DisplayPort)
3582 continue;
3583
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003584 encoder = to_intel_encoder(connector->encoder);
3585 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3586 continue;
3587
3588 if (encoder && connector->status == connector_status_connected) {
3589 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003590 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003591 } else
3592 seq_puts(m, "0");
3593 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003594 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003595
3596 return 0;
3597}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003598DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003599
Jani Nikulae5315212019-01-16 11:15:23 +02003600static void wm_latency_show(struct seq_file *m, const u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003601{
David Weinehall36cdd012016-08-22 13:59:31 +03003602 struct drm_i915_private *dev_priv = m->private;
3603 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003604 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003605 int num_levels;
3606
David Weinehall36cdd012016-08-22 13:59:31 +03003607 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003608 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003609 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003610 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003611 else if (IS_G4X(dev_priv))
3612 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003613 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003614 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003615
3616 drm_modeset_lock_all(dev);
3617
3618 for (level = 0; level < num_levels; level++) {
3619 unsigned int latency = wm[level];
3620
Damien Lespiau97e94b22014-11-04 17:06:50 +00003621 /*
3622 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003623 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003624 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003625 if (INTEL_GEN(dev_priv) >= 9 ||
3626 IS_VALLEYVIEW(dev_priv) ||
3627 IS_CHERRYVIEW(dev_priv) ||
3628 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003629 latency *= 10;
3630 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003631 latency *= 5;
3632
3633 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003634 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003635 }
3636
3637 drm_modeset_unlock_all(dev);
3638}
3639
3640static int pri_wm_latency_show(struct seq_file *m, void *data)
3641{
David Weinehall36cdd012016-08-22 13:59:31 +03003642 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003643 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003644
David Weinehall36cdd012016-08-22 13:59:31 +03003645 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003646 latencies = dev_priv->wm.skl_latency;
3647 else
David Weinehall36cdd012016-08-22 13:59:31 +03003648 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003649
3650 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003651
3652 return 0;
3653}
3654
3655static int spr_wm_latency_show(struct seq_file *m, void *data)
3656{
David Weinehall36cdd012016-08-22 13:59:31 +03003657 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003658 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003659
David Weinehall36cdd012016-08-22 13:59:31 +03003660 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003661 latencies = dev_priv->wm.skl_latency;
3662 else
David Weinehall36cdd012016-08-22 13:59:31 +03003663 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003664
3665 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003666
3667 return 0;
3668}
3669
3670static int cur_wm_latency_show(struct seq_file *m, void *data)
3671{
David Weinehall36cdd012016-08-22 13:59:31 +03003672 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003673 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003674
David Weinehall36cdd012016-08-22 13:59:31 +03003675 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003676 latencies = dev_priv->wm.skl_latency;
3677 else
David Weinehall36cdd012016-08-22 13:59:31 +03003678 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003679
3680 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003681
3682 return 0;
3683}
3684
3685static int pri_wm_latency_open(struct inode *inode, struct file *file)
3686{
David Weinehall36cdd012016-08-22 13:59:31 +03003687 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003688
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003689 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003690 return -ENODEV;
3691
David Weinehall36cdd012016-08-22 13:59:31 +03003692 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003693}
3694
3695static int spr_wm_latency_open(struct inode *inode, struct file *file)
3696{
David Weinehall36cdd012016-08-22 13:59:31 +03003697 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003698
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003699 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003700 return -ENODEV;
3701
David Weinehall36cdd012016-08-22 13:59:31 +03003702 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003703}
3704
3705static int cur_wm_latency_open(struct inode *inode, struct file *file)
3706{
David Weinehall36cdd012016-08-22 13:59:31 +03003707 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003708
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003709 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003710 return -ENODEV;
3711
David Weinehall36cdd012016-08-22 13:59:31 +03003712 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003713}
3714
3715static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Jani Nikulae5315212019-01-16 11:15:23 +02003716 size_t len, loff_t *offp, u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003717{
3718 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003719 struct drm_i915_private *dev_priv = m->private;
3720 struct drm_device *dev = &dev_priv->drm;
Jani Nikulae5315212019-01-16 11:15:23 +02003721 u16 new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003722 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003723 int level;
3724 int ret;
3725 char tmp[32];
3726
David Weinehall36cdd012016-08-22 13:59:31 +03003727 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003728 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003729 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003730 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003731 else if (IS_G4X(dev_priv))
3732 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003733 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003734 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003735
Ville Syrjälä369a1342014-01-22 14:36:08 +02003736 if (len >= sizeof(tmp))
3737 return -EINVAL;
3738
3739 if (copy_from_user(tmp, ubuf, len))
3740 return -EFAULT;
3741
3742 tmp[len] = '\0';
3743
Damien Lespiau97e94b22014-11-04 17:06:50 +00003744 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3745 &new[0], &new[1], &new[2], &new[3],
3746 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003747 if (ret != num_levels)
3748 return -EINVAL;
3749
3750 drm_modeset_lock_all(dev);
3751
3752 for (level = 0; level < num_levels; level++)
3753 wm[level] = new[level];
3754
3755 drm_modeset_unlock_all(dev);
3756
3757 return len;
3758}
3759
3760
3761static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3762 size_t len, loff_t *offp)
3763{
3764 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003765 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003766 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003767
David Weinehall36cdd012016-08-22 13:59:31 +03003768 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003769 latencies = dev_priv->wm.skl_latency;
3770 else
David Weinehall36cdd012016-08-22 13:59:31 +03003771 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003772
3773 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003774}
3775
3776static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3777 size_t len, loff_t *offp)
3778{
3779 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003780 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003781 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003782
David Weinehall36cdd012016-08-22 13:59:31 +03003783 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003784 latencies = dev_priv->wm.skl_latency;
3785 else
David Weinehall36cdd012016-08-22 13:59:31 +03003786 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003787
3788 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003789}
3790
3791static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3792 size_t len, loff_t *offp)
3793{
3794 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003795 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003796 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003797
David Weinehall36cdd012016-08-22 13:59:31 +03003798 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003799 latencies = dev_priv->wm.skl_latency;
3800 else
David Weinehall36cdd012016-08-22 13:59:31 +03003801 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003802
3803 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003804}
3805
3806static const struct file_operations i915_pri_wm_latency_fops = {
3807 .owner = THIS_MODULE,
3808 .open = pri_wm_latency_open,
3809 .read = seq_read,
3810 .llseek = seq_lseek,
3811 .release = single_release,
3812 .write = pri_wm_latency_write
3813};
3814
3815static const struct file_operations i915_spr_wm_latency_fops = {
3816 .owner = THIS_MODULE,
3817 .open = spr_wm_latency_open,
3818 .read = seq_read,
3819 .llseek = seq_lseek,
3820 .release = single_release,
3821 .write = spr_wm_latency_write
3822};
3823
3824static const struct file_operations i915_cur_wm_latency_fops = {
3825 .owner = THIS_MODULE,
3826 .open = cur_wm_latency_open,
3827 .read = seq_read,
3828 .llseek = seq_lseek,
3829 .release = single_release,
3830 .write = cur_wm_latency_write
3831};
3832
Kees Cook647416f2013-03-10 14:10:06 -07003833static int
3834i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003835{
Chris Wilsonc41166f2019-02-20 14:56:37 +00003836 int ret = i915_terminally_wedged(data);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003837
Chris Wilsonc41166f2019-02-20 14:56:37 +00003838 switch (ret) {
3839 case -EIO:
3840 *val = 1;
3841 return 0;
3842 case 0:
3843 *val = 0;
3844 return 0;
3845 default:
3846 return ret;
3847 }
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003848}
3849
Kees Cook647416f2013-03-10 14:10:06 -07003850static int
3851i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003852{
Chris Wilson598b6b52017-03-25 13:47:35 +00003853 struct drm_i915_private *i915 = data;
Imre Deakd46c0512014-04-14 20:24:27 +03003854
Chris Wilson15cbf002019-02-08 15:37:06 +00003855 /* Flush any previous reset before applying for a new one */
3856 wait_event(i915->gpu_error.reset_queue,
3857 !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003858
Chris Wilsonce800752018-03-20 10:04:49 +00003859 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3860 "Manually set wedged engine mask = %llx", val);
Kees Cook647416f2013-03-10 14:10:06 -07003861 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003862}
3863
Kees Cook647416f2013-03-10 14:10:06 -07003864DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3865 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03003866 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003867
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003868#define DROP_UNBOUND BIT(0)
3869#define DROP_BOUND BIT(1)
3870#define DROP_RETIRE BIT(2)
3871#define DROP_ACTIVE BIT(3)
3872#define DROP_FREED BIT(4)
3873#define DROP_SHRINK_ALL BIT(5)
3874#define DROP_IDLE BIT(6)
Chris Wilson6b048702018-09-03 09:33:37 +01003875#define DROP_RESET_ACTIVE BIT(7)
3876#define DROP_RESET_SEQNO BIT(8)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003877#define DROP_ALL (DROP_UNBOUND | \
3878 DROP_BOUND | \
3879 DROP_RETIRE | \
3880 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00003881 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003882 DROP_SHRINK_ALL |\
Chris Wilson6b048702018-09-03 09:33:37 +01003883 DROP_IDLE | \
3884 DROP_RESET_ACTIVE | \
3885 DROP_RESET_SEQNO)
Kees Cook647416f2013-03-10 14:10:06 -07003886static int
3887i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003888{
Kees Cook647416f2013-03-10 14:10:06 -07003889 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00003890
Kees Cook647416f2013-03-10 14:10:06 -07003891 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003892}
3893
Kees Cook647416f2013-03-10 14:10:06 -07003894static int
3895i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003896{
Chris Wilson6b048702018-09-03 09:33:37 +01003897 struct drm_i915_private *i915 = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003898 intel_wakeref_t wakeref;
Chris Wilson00c26cf2017-05-24 17:26:53 +01003899 int ret = 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003900
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003901 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3902 val, val & DROP_ALL);
Chris Wilsona0371212019-01-14 14:21:14 +00003903 wakeref = intel_runtime_pm_get(i915);
Chris Wilsondd624af2013-01-15 12:39:35 +00003904
Chris Wilsonad4062d2019-01-28 01:02:18 +00003905 if (val & DROP_RESET_ACTIVE &&
3906 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
Chris Wilson6b048702018-09-03 09:33:37 +01003907 i915_gem_set_wedged(i915);
3908
Chris Wilsondd624af2013-01-15 12:39:35 +00003909 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3910 * on ioctls on -EAGAIN. */
Chris Wilson6b048702018-09-03 09:33:37 +01003911 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3912 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00003913 if (ret)
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03003914 goto out;
Chris Wilsondd624af2013-01-15 12:39:35 +00003915
Chris Wilson00c26cf2017-05-24 17:26:53 +01003916 if (val & DROP_ACTIVE)
Chris Wilson6b048702018-09-03 09:33:37 +01003917 ret = i915_gem_wait_for_idle(i915,
Chris Wilson00c26cf2017-05-24 17:26:53 +01003918 I915_WAIT_INTERRUPTIBLE |
Chris Wilsonec625fb2018-07-09 13:20:42 +01003919 I915_WAIT_LOCKED,
3920 MAX_SCHEDULE_TIMEOUT);
Chris Wilson00c26cf2017-05-24 17:26:53 +01003921
Chris Wilson6b048702018-09-03 09:33:37 +01003922 if (val & DROP_RETIRE)
3923 i915_retire_requests(i915);
3924
3925 mutex_unlock(&i915->drm.struct_mutex);
3926 }
3927
Chris Wilsonc41166f2019-02-20 14:56:37 +00003928 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
Chris Wilson6b048702018-09-03 09:33:37 +01003929 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003930
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003931 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01003932 if (val & DROP_BOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003933 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01003934
Chris Wilson21ab4e72014-09-09 11:16:08 +01003935 if (val & DROP_UNBOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003936 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00003937
Chris Wilson8eadc192017-03-08 14:46:22 +00003938 if (val & DROP_SHRINK_ALL)
Chris Wilson6b048702018-09-03 09:33:37 +01003939 i915_gem_shrink_all(i915);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003940 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00003941
Chris Wilson4dfacb02018-05-31 09:22:43 +01003942 if (val & DROP_IDLE) {
3943 do {
Chris Wilson6b048702018-09-03 09:33:37 +01003944 if (READ_ONCE(i915->gt.active_requests))
3945 flush_delayed_work(&i915->gt.retire_work);
3946 drain_delayed_work(&i915->gt.idle_work);
3947 } while (READ_ONCE(i915->gt.awake));
Chris Wilson4dfacb02018-05-31 09:22:43 +01003948 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003949
Chris Wilsonc9c704712018-02-19 22:06:31 +00003950 if (val & DROP_FREED)
Chris Wilson6b048702018-09-03 09:33:37 +01003951 i915_gem_drain_freed_objects(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003952
Joonas Lahtinen198a2a22018-10-18 12:20:25 +03003953out:
Chris Wilsona0371212019-01-14 14:21:14 +00003954 intel_runtime_pm_put(i915, wakeref);
Chris Wilson9d3eb2c2018-10-15 12:58:56 +01003955
Kees Cook647416f2013-03-10 14:10:06 -07003956 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00003957}
3958
Kees Cook647416f2013-03-10 14:10:06 -07003959DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3960 i915_drop_caches_get, i915_drop_caches_set,
3961 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00003962
Kees Cook647416f2013-03-10 14:10:06 -07003963static int
Kees Cook647416f2013-03-10 14:10:06 -07003964i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003965{
David Weinehall36cdd012016-08-22 13:59:31 +03003966 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003967 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00003968 u32 snpcr = 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003969
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003970 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003971 return -ENODEV;
3972
Chris Wilsond4225a52019-01-14 14:21:23 +00003973 with_intel_runtime_pm(dev_priv, wakeref)
3974 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003975
Kees Cook647416f2013-03-10 14:10:06 -07003976 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003977
Kees Cook647416f2013-03-10 14:10:06 -07003978 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003979}
3980
Kees Cook647416f2013-03-10 14:10:06 -07003981static int
3982i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003983{
David Weinehall36cdd012016-08-22 13:59:31 +03003984 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003985 intel_wakeref_t wakeref;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003986
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003987 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003988 return -ENODEV;
3989
Kees Cook647416f2013-03-10 14:10:06 -07003990 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003991 return -EINVAL;
3992
Kees Cook647416f2013-03-10 14:10:06 -07003993 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Chris Wilsond4225a52019-01-14 14:21:23 +00003994 with_intel_runtime_pm(dev_priv, wakeref) {
3995 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003996
Chris Wilsond4225a52019-01-14 14:21:23 +00003997 /* Update the cache sharing policy here as well */
3998 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3999 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4000 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
4001 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4002 }
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004003
Kees Cook647416f2013-03-10 14:10:06 -07004004 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004005}
4006
Kees Cook647416f2013-03-10 14:10:06 -07004007DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4008 i915_cache_sharing_get, i915_cache_sharing_set,
4009 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07004010
David Weinehall36cdd012016-08-22 13:59:31 +03004011static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004012 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004013{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004014#define SS_MAX 2
4015 const int ss_max = SS_MAX;
4016 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004017 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004018
4019 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4020 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4021 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4022 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4023
4024 for (ss = 0; ss < ss_max; ss++) {
4025 unsigned int eu_cnt;
4026
4027 if (sig1[ss] & CHV_SS_PG_ENABLE)
4028 /* skip disabled subslice */
4029 continue;
4030
Imre Deakf08a0c92016-08-31 19:13:04 +03004031 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004032 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004033 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4034 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4035 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4036 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004037 sseu->eu_total += eu_cnt;
4038 sseu->eu_per_subslice = max_t(unsigned int,
4039 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004040 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004041#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004042}
4043
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004044static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4045 struct sseu_dev_info *sseu)
4046{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004047#define SS_MAX 6
Jani Nikula02584042018-12-31 16:56:41 +02004048 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004049 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004050 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004051
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004052 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004053 /*
4054 * FIXME: Valid SS Mask respects the spec and read
Alexandre Belloni3c64ea82018-11-20 16:14:15 +01004055 * only valid bits for those registers, excluding reserved
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004056 * although this seems wrong because it would leave many
4057 * subslices without ACK.
4058 */
4059 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4060 GEN10_PGCTL_VALID_SS_MASK(s);
4061 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4062 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4063 }
4064
4065 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4066 GEN9_PGCTL_SSA_EU19_ACK |
4067 GEN9_PGCTL_SSA_EU210_ACK |
4068 GEN9_PGCTL_SSA_EU311_ACK;
4069 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4070 GEN9_PGCTL_SSB_EU19_ACK |
4071 GEN9_PGCTL_SSB_EU210_ACK |
4072 GEN9_PGCTL_SSB_EU311_ACK;
4073
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004074 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004075 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4076 /* skip disabled slice */
4077 continue;
4078
4079 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004080 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004081
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004082 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004083 unsigned int eu_cnt;
4084
4085 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4086 /* skip disabled subslice */
4087 continue;
4088
4089 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4090 eu_mask[ss % 2]);
4091 sseu->eu_total += eu_cnt;
4092 sseu->eu_per_subslice = max_t(unsigned int,
4093 sseu->eu_per_subslice,
4094 eu_cnt);
4095 }
4096 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004097#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004098}
4099
David Weinehall36cdd012016-08-22 13:59:31 +03004100static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004101 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004102{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004103#define SS_MAX 3
Jani Nikula02584042018-12-31 16:56:41 +02004104 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004105 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004106 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004107
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004108 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004109 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4110 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4111 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4112 }
4113
Jeff McGee5d395252015-04-03 18:13:17 -07004114 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4115 GEN9_PGCTL_SSA_EU19_ACK |
4116 GEN9_PGCTL_SSA_EU210_ACK |
4117 GEN9_PGCTL_SSA_EU311_ACK;
4118 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4119 GEN9_PGCTL_SSB_EU19_ACK |
4120 GEN9_PGCTL_SSB_EU210_ACK |
4121 GEN9_PGCTL_SSB_EU311_ACK;
4122
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004123 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004124 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4125 /* skip disabled slice */
4126 continue;
4127
Imre Deakf08a0c92016-08-31 19:13:04 +03004128 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004129
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004130 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004131 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004132 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004133
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004134 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004135 unsigned int eu_cnt;
4136
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004137 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004138 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4139 /* skip disabled subslice */
4140 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004141
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004142 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004143 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004144
Jeff McGee5d395252015-04-03 18:13:17 -07004145 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4146 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004147 sseu->eu_total += eu_cnt;
4148 sseu->eu_per_subslice = max_t(unsigned int,
4149 sseu->eu_per_subslice,
4150 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004151 }
4152 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004153#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004154}
4155
David Weinehall36cdd012016-08-22 13:59:31 +03004156static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004157 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004158{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004159 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004160 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004161
Imre Deakf08a0c92016-08-31 19:13:04 +03004162 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004163
Imre Deakf08a0c92016-08-31 19:13:04 +03004164 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004165 sseu->eu_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004166 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004167 for (s = 0; s < fls(sseu->slice_mask); s++) {
4168 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004169 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004170 }
Imre Deak57ec1712016-08-31 19:13:05 +03004171 sseu->eu_total = sseu->eu_per_subslice *
4172 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004173
4174 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004175 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004176 u8 subslice_7eu =
Jani Nikula02584042018-12-31 16:56:41 +02004177 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004178
Imre Deak915490d2016-08-31 19:13:01 +03004179 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004180 }
4181 }
4182}
4183
Imre Deak615d8902016-08-31 19:13:03 +03004184static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4185 const struct sseu_dev_info *sseu)
4186{
4187 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4188 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004189 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004190
Imre Deakc67ba532016-08-31 19:13:06 +03004191 seq_printf(m, " %s Slice Mask: %04x\n", type,
4192 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004193 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004194 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004195 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004196 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004197 for (s = 0; s < fls(sseu->slice_mask); s++) {
4198 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4199 s, hweight8(sseu->subslice_mask[s]));
4200 }
Imre Deak615d8902016-08-31 19:13:03 +03004201 seq_printf(m, " %s EU Total: %u\n", type,
4202 sseu->eu_total);
4203 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4204 sseu->eu_per_subslice);
4205
4206 if (!is_available_info)
4207 return;
4208
4209 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4210 if (HAS_POOLED_EU(dev_priv))
4211 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4212
4213 seq_printf(m, " Has Slice Power Gating: %s\n",
4214 yesno(sseu->has_slice_pg));
4215 seq_printf(m, " Has Subslice Power Gating: %s\n",
4216 yesno(sseu->has_subslice_pg));
4217 seq_printf(m, " Has EU Power Gating: %s\n",
4218 yesno(sseu->has_eu_pg));
4219}
4220
Jeff McGee38732182015-02-13 10:27:54 -06004221static int i915_sseu_status(struct seq_file *m, void *unused)
4222{
David Weinehall36cdd012016-08-22 13:59:31 +03004223 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004224 struct sseu_dev_info sseu;
Chris Wilsona0371212019-01-14 14:21:14 +00004225 intel_wakeref_t wakeref;
Jeff McGee38732182015-02-13 10:27:54 -06004226
David Weinehall36cdd012016-08-22 13:59:31 +03004227 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004228 return -ENODEV;
4229
4230 seq_puts(m, "SSEU Device Info\n");
Jani Nikula02584042018-12-31 16:56:41 +02004231 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004232
Jeff McGee7f992ab2015-02-13 10:27:55 -06004233 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004234 memset(&sseu, 0, sizeof(sseu));
Jani Nikula02584042018-12-31 16:56:41 +02004235 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4236 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004237 sseu.max_eus_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004238 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004239
Chris Wilsond4225a52019-01-14 14:21:23 +00004240 with_intel_runtime_pm(dev_priv, wakeref) {
4241 if (IS_CHERRYVIEW(dev_priv))
4242 cherryview_sseu_device_status(dev_priv, &sseu);
4243 else if (IS_BROADWELL(dev_priv))
4244 broadwell_sseu_device_status(dev_priv, &sseu);
4245 else if (IS_GEN(dev_priv, 9))
4246 gen9_sseu_device_status(dev_priv, &sseu);
4247 else if (INTEL_GEN(dev_priv) >= 10)
4248 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004249 }
David Weinehall238010e2016-08-01 17:33:27 +03004250
Imre Deak615d8902016-08-31 19:13:03 +03004251 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004252
Jeff McGee38732182015-02-13 10:27:54 -06004253 return 0;
4254}
4255
Ben Widawsky6d794d42011-04-25 11:25:56 -07004256static int i915_forcewake_open(struct inode *inode, struct file *file)
4257{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004258 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004259
Chris Wilsond7a133d2017-09-07 14:44:41 +01004260 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004261 return 0;
4262
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004263 file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
Chris Wilsond7a133d2017-09-07 14:44:41 +01004264 intel_uncore_forcewake_user_get(i915);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004265
4266 return 0;
4267}
4268
Ben Widawskyc43b5632012-04-16 14:07:40 -07004269static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004270{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004271 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004272
Chris Wilsond7a133d2017-09-07 14:44:41 +01004273 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004274 return 0;
4275
Chris Wilsond7a133d2017-09-07 14:44:41 +01004276 intel_uncore_forcewake_user_put(i915);
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004277 intel_runtime_pm_put(i915,
4278 (intel_wakeref_t)(uintptr_t)file->private_data);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004279
4280 return 0;
4281}
4282
4283static const struct file_operations i915_forcewake_fops = {
4284 .owner = THIS_MODULE,
4285 .open = i915_forcewake_open,
4286 .release = i915_forcewake_release,
4287};
4288
Lyude317eaa92017-02-03 21:18:25 -05004289static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4290{
4291 struct drm_i915_private *dev_priv = m->private;
4292 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4293
Lyude Paul6fc5d782018-11-20 19:37:17 -05004294 /* Synchronize with everything first in case there's been an HPD
4295 * storm, but we haven't finished handling it in the kernel yet
4296 */
4297 synchronize_irq(dev_priv->drm.irq);
4298 flush_work(&dev_priv->hotplug.dig_port_work);
4299 flush_work(&dev_priv->hotplug.hotplug_work);
4300
Lyude317eaa92017-02-03 21:18:25 -05004301 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4302 seq_printf(m, "Detected: %s\n",
4303 yesno(delayed_work_pending(&hotplug->reenable_work)));
4304
4305 return 0;
4306}
4307
4308static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4309 const char __user *ubuf, size_t len,
4310 loff_t *offp)
4311{
4312 struct seq_file *m = file->private_data;
4313 struct drm_i915_private *dev_priv = m->private;
4314 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4315 unsigned int new_threshold;
4316 int i;
4317 char *newline;
4318 char tmp[16];
4319
4320 if (len >= sizeof(tmp))
4321 return -EINVAL;
4322
4323 if (copy_from_user(tmp, ubuf, len))
4324 return -EFAULT;
4325
4326 tmp[len] = '\0';
4327
4328 /* Strip newline, if any */
4329 newline = strchr(tmp, '\n');
4330 if (newline)
4331 *newline = '\0';
4332
4333 if (strcmp(tmp, "reset") == 0)
4334 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4335 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4336 return -EINVAL;
4337
4338 if (new_threshold > 0)
4339 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4340 new_threshold);
4341 else
4342 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4343
4344 spin_lock_irq(&dev_priv->irq_lock);
4345 hotplug->hpd_storm_threshold = new_threshold;
4346 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4347 for_each_hpd_pin(i)
4348 hotplug->stats[i].count = 0;
4349 spin_unlock_irq(&dev_priv->irq_lock);
4350
4351 /* Re-enable hpd immediately if we were in an irq storm */
4352 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4353
4354 return len;
4355}
4356
4357static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4358{
4359 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4360}
4361
4362static const struct file_operations i915_hpd_storm_ctl_fops = {
4363 .owner = THIS_MODULE,
4364 .open = i915_hpd_storm_ctl_open,
4365 .read = seq_read,
4366 .llseek = seq_lseek,
4367 .release = single_release,
4368 .write = i915_hpd_storm_ctl_write
4369};
4370
Lyude Paul9a64c652018-11-06 16:30:16 -05004371static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4372{
4373 struct drm_i915_private *dev_priv = m->private;
4374
4375 seq_printf(m, "Enabled: %s\n",
4376 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4377
4378 return 0;
4379}
4380
4381static int
4382i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4383{
4384 return single_open(file, i915_hpd_short_storm_ctl_show,
4385 inode->i_private);
4386}
4387
4388static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4389 const char __user *ubuf,
4390 size_t len, loff_t *offp)
4391{
4392 struct seq_file *m = file->private_data;
4393 struct drm_i915_private *dev_priv = m->private;
4394 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4395 char *newline;
4396 char tmp[16];
4397 int i;
4398 bool new_state;
4399
4400 if (len >= sizeof(tmp))
4401 return -EINVAL;
4402
4403 if (copy_from_user(tmp, ubuf, len))
4404 return -EFAULT;
4405
4406 tmp[len] = '\0';
4407
4408 /* Strip newline, if any */
4409 newline = strchr(tmp, '\n');
4410 if (newline)
4411 *newline = '\0';
4412
4413 /* Reset to the "default" state for this system */
4414 if (strcmp(tmp, "reset") == 0)
4415 new_state = !HAS_DP_MST(dev_priv);
4416 else if (kstrtobool(tmp, &new_state) != 0)
4417 return -EINVAL;
4418
4419 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4420 new_state ? "En" : "Dis");
4421
4422 spin_lock_irq(&dev_priv->irq_lock);
4423 hotplug->hpd_short_storm_enabled = new_state;
4424 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4425 for_each_hpd_pin(i)
4426 hotplug->stats[i].count = 0;
4427 spin_unlock_irq(&dev_priv->irq_lock);
4428
4429 /* Re-enable hpd immediately if we were in an irq storm */
4430 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4431
4432 return len;
4433}
4434
4435static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4436 .owner = THIS_MODULE,
4437 .open = i915_hpd_short_storm_ctl_open,
4438 .read = seq_read,
4439 .llseek = seq_lseek,
4440 .release = single_release,
4441 .write = i915_hpd_short_storm_ctl_write,
4442};
4443
C, Ramalingam35954e82017-11-08 00:08:23 +05304444static int i915_drrs_ctl_set(void *data, u64 val)
4445{
4446 struct drm_i915_private *dev_priv = data;
4447 struct drm_device *dev = &dev_priv->drm;
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004448 struct intel_crtc *crtc;
C, Ramalingam35954e82017-11-08 00:08:23 +05304449
4450 if (INTEL_GEN(dev_priv) < 7)
4451 return -ENODEV;
4452
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004453 for_each_intel_crtc(dev, crtc) {
4454 struct drm_connector_list_iter conn_iter;
4455 struct intel_crtc_state *crtc_state;
4456 struct drm_connector *connector;
4457 struct drm_crtc_commit *commit;
4458 int ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304459
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004460 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4461 if (ret)
4462 return ret;
4463
4464 crtc_state = to_intel_crtc_state(crtc->base.state);
4465
4466 if (!crtc_state->base.active ||
4467 !crtc_state->has_drrs)
4468 goto out;
4469
4470 commit = crtc_state->base.commit;
4471 if (commit) {
4472 ret = wait_for_completion_interruptible(&commit->hw_done);
4473 if (ret)
4474 goto out;
4475 }
4476
4477 drm_connector_list_iter_begin(dev, &conn_iter);
4478 drm_for_each_connector_iter(connector, &conn_iter) {
4479 struct intel_encoder *encoder;
4480 struct intel_dp *intel_dp;
4481
4482 if (!(crtc_state->base.connector_mask &
4483 drm_connector_mask(connector)))
4484 continue;
4485
4486 encoder = intel_attached_encoder(connector);
C, Ramalingam35954e82017-11-08 00:08:23 +05304487 if (encoder->type != INTEL_OUTPUT_EDP)
4488 continue;
4489
4490 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4491 val ? "en" : "dis", val);
4492
4493 intel_dp = enc_to_intel_dp(&encoder->base);
4494 if (val)
4495 intel_edp_drrs_enable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004496 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304497 else
4498 intel_edp_drrs_disable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004499 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304500 }
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004501 drm_connector_list_iter_end(&conn_iter);
4502
4503out:
4504 drm_modeset_unlock(&crtc->base.mutex);
4505 if (ret)
4506 return ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304507 }
C, Ramalingam35954e82017-11-08 00:08:23 +05304508
4509 return 0;
4510}
4511
4512DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4513
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004514static ssize_t
4515i915_fifo_underrun_reset_write(struct file *filp,
4516 const char __user *ubuf,
4517 size_t cnt, loff_t *ppos)
4518{
4519 struct drm_i915_private *dev_priv = filp->private_data;
4520 struct intel_crtc *intel_crtc;
4521 struct drm_device *dev = &dev_priv->drm;
4522 int ret;
4523 bool reset;
4524
4525 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4526 if (ret)
4527 return ret;
4528
4529 if (!reset)
4530 return cnt;
4531
4532 for_each_intel_crtc(dev, intel_crtc) {
4533 struct drm_crtc_commit *commit;
4534 struct intel_crtc_state *crtc_state;
4535
4536 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4537 if (ret)
4538 return ret;
4539
4540 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4541 commit = crtc_state->base.commit;
4542 if (commit) {
4543 ret = wait_for_completion_interruptible(&commit->hw_done);
4544 if (!ret)
4545 ret = wait_for_completion_interruptible(&commit->flip_done);
4546 }
4547
4548 if (!ret && crtc_state->base.active) {
4549 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4550 pipe_name(intel_crtc->pipe));
4551
4552 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4553 }
4554
4555 drm_modeset_unlock(&intel_crtc->base.mutex);
4556
4557 if (ret)
4558 return ret;
4559 }
4560
4561 ret = intel_fbc_reset_underrun(dev_priv);
4562 if (ret)
4563 return ret;
4564
4565 return cnt;
4566}
4567
4568static const struct file_operations i915_fifo_underrun_reset_ops = {
4569 .owner = THIS_MODULE,
4570 .open = simple_open,
4571 .write = i915_fifo_underrun_reset_write,
4572 .llseek = default_llseek,
4573};
4574
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004575static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004576 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004577 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004578 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004579 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004580 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004581 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004582 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004583 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004584 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004585 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004586 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004587 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004588 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304589 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004590 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004591 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004592 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004593 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004594 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004595 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004596 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004597 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004598 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004599 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004600 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004601 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004602 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004603 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004604 {"i915_swizzle_info", i915_swizzle_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004605 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004606 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004607 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004608 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004609 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004610 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004611 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004612 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004613 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004614 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004615 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004616 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004617 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004618 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004619 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304620 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004621 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004622};
Ben Gamari27c202a2009-07-01 22:26:52 -04004623#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004624
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004625static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004626 const char *name;
4627 const struct file_operations *fops;
4628} i915_debugfs_files[] = {
4629 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004630 {"i915_cache_sharing", &i915_cache_sharing_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004631 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004632#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004633 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004634 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004635#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004636 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004637 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4638 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4639 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004640 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004641 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4642 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304643 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004644 {"i915_guc_log_level", &i915_guc_log_level_fops},
4645 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304646 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
Lyude Paul9a64c652018-11-06 16:30:16 -05004647 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304648 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004649 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4650 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004651};
4652
Chris Wilson1dac8912016-06-24 14:00:17 +01004653int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004654{
Chris Wilson91c8a322016-07-05 10:40:23 +01004655 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004656 struct dentry *ent;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004657 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004658
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004659 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4660 minor->debugfs_root, to_i915(minor->dev),
4661 &i915_forcewake_fops);
4662 if (!ent)
4663 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004664
Daniel Vetter34b96742013-07-04 20:49:44 +02004665 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004666 ent = debugfs_create_file(i915_debugfs_files[i].name,
4667 S_IRUGO | S_IWUSR,
4668 minor->debugfs_root,
4669 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004670 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004671 if (!ent)
4672 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004673 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004674
Ben Gamari27c202a2009-07-01 22:26:52 -04004675 return drm_debugfs_create_files(i915_debugfs_list,
4676 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004677 minor->debugfs_root, minor);
4678}
4679
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004680struct dpcd_block {
4681 /* DPCD dump start address. */
4682 unsigned int offset;
4683 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4684 unsigned int end;
4685 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4686 size_t size;
4687 /* Only valid for eDP. */
4688 bool edp;
4689};
4690
4691static const struct dpcd_block i915_dpcd_debug[] = {
4692 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4693 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4694 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4695 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4696 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4697 { .offset = DP_SET_POWER },
4698 { .offset = DP_EDP_DPCD_REV },
4699 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4700 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4701 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4702};
4703
4704static int i915_dpcd_show(struct seq_file *m, void *data)
4705{
4706 struct drm_connector *connector = m->private;
4707 struct intel_dp *intel_dp =
4708 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Jani Nikulae5315212019-01-16 11:15:23 +02004709 u8 buf[16];
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004710 ssize_t err;
4711 int i;
4712
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004713 if (connector->status != connector_status_connected)
4714 return -ENODEV;
4715
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004716 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4717 const struct dpcd_block *b = &i915_dpcd_debug[i];
4718 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4719
4720 if (b->edp &&
4721 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4722 continue;
4723
4724 /* low tech for now */
4725 if (WARN_ON(size > sizeof(buf)))
4726 continue;
4727
4728 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
Chris Wilson65404c82018-10-10 09:17:06 +01004729 if (err < 0)
4730 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4731 else
4732 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004733 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004734
4735 return 0;
4736}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004737DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004738
David Weinehallecbd6782016-08-23 12:23:56 +03004739static int i915_panel_show(struct seq_file *m, void *data)
4740{
4741 struct drm_connector *connector = m->private;
4742 struct intel_dp *intel_dp =
4743 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4744
4745 if (connector->status != connector_status_connected)
4746 return -ENODEV;
4747
4748 seq_printf(m, "Panel power up delay: %d\n",
4749 intel_dp->panel_power_up_delay);
4750 seq_printf(m, "Panel power down delay: %d\n",
4751 intel_dp->panel_power_down_delay);
4752 seq_printf(m, "Backlight on delay: %d\n",
4753 intel_dp->backlight_on_delay);
4754 seq_printf(m, "Backlight off delay: %d\n",
4755 intel_dp->backlight_off_delay);
4756
4757 return 0;
4758}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004759DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004760
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304761static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4762{
4763 struct drm_connector *connector = m->private;
4764 struct intel_connector *intel_connector = to_intel_connector(connector);
4765
4766 if (connector->status != connector_status_connected)
4767 return -ENODEV;
4768
4769 /* HDCP is supported by connector */
Ramalingam Cd3dacc72018-10-29 15:15:46 +05304770 if (!intel_connector->hdcp.shim)
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304771 return -EINVAL;
4772
4773 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4774 connector->base.id);
4775 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4776 "None" : "HDCP1.4");
4777 seq_puts(m, "\n");
4778
4779 return 0;
4780}
4781DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4782
Manasi Navaree845f092018-12-05 16:54:07 -08004783static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4784{
4785 struct drm_connector *connector = m->private;
4786 struct drm_device *dev = connector->dev;
4787 struct drm_crtc *crtc;
4788 struct intel_dp *intel_dp;
4789 struct drm_modeset_acquire_ctx ctx;
4790 struct intel_crtc_state *crtc_state = NULL;
4791 int ret = 0;
4792 bool try_again = false;
4793
4794 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4795
4796 do {
Manasi Navare6afe8922018-12-19 15:51:20 -08004797 try_again = false;
Manasi Navaree845f092018-12-05 16:54:07 -08004798 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4799 &ctx);
4800 if (ret) {
4801 ret = -EINTR;
4802 break;
4803 }
4804 crtc = connector->state->crtc;
4805 if (connector->status != connector_status_connected || !crtc) {
4806 ret = -ENODEV;
4807 break;
4808 }
4809 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4810 if (ret == -EDEADLK) {
4811 ret = drm_modeset_backoff(&ctx);
4812 if (!ret) {
4813 try_again = true;
4814 continue;
4815 }
4816 break;
4817 } else if (ret) {
4818 break;
4819 }
4820 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4821 crtc_state = to_intel_crtc_state(crtc->state);
4822 seq_printf(m, "DSC_Enabled: %s\n",
4823 yesno(crtc_state->dsc_params.compression_enable));
Radhakrishna Sripadafed85692019-01-09 13:14:14 -08004824 seq_printf(m, "DSC_Sink_Support: %s\n",
4825 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
Manasi Navaree845f092018-12-05 16:54:07 -08004826 if (!intel_dp_is_edp(intel_dp))
4827 seq_printf(m, "FEC_Sink_Support: %s\n",
4828 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4829 } while (try_again);
4830
4831 drm_modeset_drop_locks(&ctx);
4832 drm_modeset_acquire_fini(&ctx);
4833
4834 return ret;
4835}
4836
4837static ssize_t i915_dsc_fec_support_write(struct file *file,
4838 const char __user *ubuf,
4839 size_t len, loff_t *offp)
4840{
4841 bool dsc_enable = false;
4842 int ret;
4843 struct drm_connector *connector =
4844 ((struct seq_file *)file->private_data)->private;
4845 struct intel_encoder *encoder = intel_attached_encoder(connector);
4846 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4847
4848 if (len == 0)
4849 return 0;
4850
4851 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4852 len);
4853
4854 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4855 if (ret < 0)
4856 return ret;
4857
4858 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4859 (dsc_enable) ? "true" : "false");
4860 intel_dp->force_dsc_en = dsc_enable;
4861
4862 *offp += len;
4863 return len;
4864}
4865
4866static int i915_dsc_fec_support_open(struct inode *inode,
4867 struct file *file)
4868{
4869 return single_open(file, i915_dsc_fec_support_show,
4870 inode->i_private);
4871}
4872
4873static const struct file_operations i915_dsc_fec_support_fops = {
4874 .owner = THIS_MODULE,
4875 .open = i915_dsc_fec_support_open,
4876 .read = seq_read,
4877 .llseek = seq_lseek,
4878 .release = single_release,
4879 .write = i915_dsc_fec_support_write
4880};
4881
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004882/**
4883 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4884 * @connector: pointer to a registered drm_connector
4885 *
4886 * Cleanup will be done by drm_connector_unregister() through a call to
4887 * drm_debugfs_connector_remove().
4888 *
4889 * Returns 0 on success, negative error codes on error.
4890 */
4891int i915_debugfs_connector_add(struct drm_connector *connector)
4892{
4893 struct dentry *root = connector->debugfs_entry;
Manasi Navaree845f092018-12-05 16:54:07 -08004894 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004895
4896 /* The connector must have been registered beforehands. */
4897 if (!root)
4898 return -ENODEV;
4899
4900 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4901 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004902 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4903 connector, &i915_dpcd_fops);
4904
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004905 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
David Weinehallecbd6782016-08-23 12:23:56 +03004906 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4907 connector, &i915_panel_fops);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004908 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4909 connector, &i915_psr_sink_status_fops);
4910 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004911
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304912 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4913 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4914 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4915 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4916 connector, &i915_hdcp_sink_capability_fops);
4917 }
4918
Manasi Navaree845f092018-12-05 16:54:07 -08004919 if (INTEL_GEN(dev_priv) >= 10 &&
4920 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4921 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4922 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4923 connector, &i915_dsc_fec_support_fops);
4924
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004925 return 0;
4926}