blob: 0e4dffcd4da41689976313e3995851457a691820 [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010029#include <linux/sched/mm.h>
Jani Nikula98afa312019-04-05 14:00:08 +030030#include <linux/sort.h>
31
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010032#include <drm/drm_debugfs.h>
33#include <drm/drm_fourcc.h>
Ben Gamari20172632009-02-17 20:08:50 -050034
Chris Wilson112ed2d2019-04-24 18:48:39 +010035#include "gt/intel_reset.h"
36
Chris Wilson02684442019-04-26 17:33:35 +010037#include "i915_gem_context.h"
Jani Nikula440e2b32019-04-29 15:29:27 +030038#include "i915_irq.h"
Jani Nikula27fec1f2019-04-05 14:00:17 +030039#include "intel_dp.h"
Jani Nikula98afa312019-04-05 14:00:08 +030040#include "intel_drv.h"
41#include "intel_fbc.h"
42#include "intel_guc_submission.h"
Jani Nikula408bd912019-04-05 14:00:13 +030043#include "intel_hdcp.h"
Jani Nikula05506912019-04-05 14:00:18 +030044#include "intel_hdmi.h"
Jani Nikula696173b2019-04-05 14:00:15 +030045#include "intel_pm.h"
Jani Nikula55367a22019-04-05 14:00:09 +030046#include "intel_psr.h"
Chris Wilson56c50982019-04-26 09:17:22 +010047#include "intel_sideband.h"
Chris Wilson9f588922019-01-16 15:33:04 +000048
David Weinehall36cdd012016-08-22 13:59:31 +030049static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
50{
51 return to_i915(node->minor->dev);
52}
53
Chris Wilson70d39fe2010-08-25 16:03:34 +010054static int i915_capabilities(struct seq_file *m, void *data)
55{
David Weinehall36cdd012016-08-22 13:59:31 +030056 struct drm_i915_private *dev_priv = node_to_i915(m->private);
57 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000058 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010059
David Weinehall36cdd012016-08-22 13:59:31 +030060 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020061 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030062 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000063
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000064 intel_device_info_dump_flags(info, &p);
Jani Nikula02584042018-12-31 16:56:41 +020065 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000066 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010067
Chris Wilson418e3cd2017-02-06 21:36:08 +000068 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000069 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000070 kernel_param_unlock(THIS_MODULE);
71
Chris Wilson70d39fe2010-08-25 16:03:34 +010072 return 0;
73}
Ben Gamari433e12f2009-02-17 20:08:51 -050074
Imre Deaka7363de2016-05-12 16:18:52 +030075static char get_active_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000076{
Chris Wilson573adb32016-08-04 16:32:39 +010077 return i915_gem_object_is_active(obj) ? '*' : ' ';
Chris Wilsona6172a82009-02-11 14:26:38 +000078}
79
Imre Deaka7363de2016-05-12 16:18:52 +030080static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010081{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010082 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010083}
84
Imre Deaka7363de2016-05-12 16:18:52 +030085static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000086{
Chris Wilson3e510a82016-08-05 10:14:23 +010087 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040088 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010089 case I915_TILING_NONE: return ' ';
90 case I915_TILING_X: return 'X';
91 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040092 }
Chris Wilsona6172a82009-02-11 14:26:38 +000093}
94
Imre Deaka7363de2016-05-12 16:18:52 +030095static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070096{
Chris Wilsona65adaf2017-10-09 09:43:57 +010097 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010098}
99
Imre Deaka7363de2016-05-12 16:18:52 +0300100static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100101{
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100102 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700103}
104
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100105static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
106{
107 u64 size = 0;
108 struct i915_vma *vma;
109
Chris Wilsone2189dd2017-12-07 21:14:07 +0000110 for_each_ggtt_vma(vma, obj) {
111 if (drm_mm_node_allocated(&vma->node))
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100112 size += vma->node.size;
113 }
114
115 return size;
116}
117
Matthew Auld7393b7e2017-10-06 23:18:28 +0100118static const char *
119stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
120{
121 size_t x = 0;
122
123 switch (page_sizes) {
124 case 0:
125 return "";
126 case I915_GTT_PAGE_SIZE_4K:
127 return "4K";
128 case I915_GTT_PAGE_SIZE_64K:
129 return "64K";
130 case I915_GTT_PAGE_SIZE_2M:
131 return "2M";
132 default:
133 if (!buf)
134 return "M";
135
136 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
137 x += snprintf(buf + x, len - x, "2M, ");
138 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
139 x += snprintf(buf + x, len - x, "64K, ");
140 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
141 x += snprintf(buf + x, len - x, "4K, ");
142 buf[x-2] = '\0';
143
144 return buf;
145 }
146}
147
Chris Wilson37811fc2010-08-25 22:45:57 +0100148static void
149describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
150{
Chris Wilsonb4716182015-04-27 13:41:17 +0100151 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000152 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700153 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100154 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800155 int pin_count = 0;
156
Chris Wilson188c1ab2016-04-03 14:14:20 +0100157 lockdep_assert_held(&obj->base.dev->struct_mutex);
158
Chris Wilsond07f0e52016-10-28 13:58:44 +0100159 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100160 &obj->base,
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100161 get_active_flag(obj),
Chris Wilson37811fc2010-08-25 22:45:57 +0100162 get_pin_flag(obj),
163 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700164 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100165 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800166 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100167 obj->read_domains,
168 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300169 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100170 obj->mm.dirty ? " dirty" : "",
171 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100172 if (obj->base.name)
173 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson528cbd12019-01-28 10:23:54 +0000174 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson20dfbde2016-08-04 16:32:30 +0100175 if (i915_vma_is_pinned(vma))
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800176 pin_count++;
Dan Carpenterba0635ff2015-02-25 16:17:48 +0300177 }
178 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100179 if (obj->pin_global)
180 seq_printf(m, " (global)");
Chris Wilson528cbd12019-01-28 10:23:54 +0000181 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100182 if (!drm_mm_node_allocated(&vma->node))
183 continue;
184
Matthew Auld7393b7e2017-10-06 23:18:28 +0100185 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100186 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100187 vma->node.start, vma->node.size,
188 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000189 if (i915_vma_is_ggtt(vma)) {
190 switch (vma->ggtt_view.type) {
191 case I915_GGTT_VIEW_NORMAL:
192 seq_puts(m, ", normal");
193 break;
194
195 case I915_GGTT_VIEW_PARTIAL:
196 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000197 vma->ggtt_view.partial.offset << PAGE_SHIFT,
198 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000199 break;
200
201 case I915_GGTT_VIEW_ROTATED:
202 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000203 vma->ggtt_view.rotated.plane[0].width,
204 vma->ggtt_view.rotated.plane[0].height,
205 vma->ggtt_view.rotated.plane[0].stride,
206 vma->ggtt_view.rotated.plane[0].offset,
207 vma->ggtt_view.rotated.plane[1].width,
208 vma->ggtt_view.rotated.plane[1].height,
209 vma->ggtt_view.rotated.plane[1].stride,
210 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000211 break;
212
213 default:
214 MISSING_CASE(vma->ggtt_view.type);
215 break;
216 }
217 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100218 if (vma->fence)
219 seq_printf(m, " , fence: %d%s",
220 vma->fence->id,
Chris Wilson21950ee2019-02-05 13:00:05 +0000221 i915_active_request_isset(&vma->last_fence) ? "*" : "");
Chris Wilson596c5922016-02-26 11:03:20 +0000222 seq_puts(m, ")");
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700223 }
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000224 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100225 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100226
Chris Wilsond07f0e52016-10-28 13:58:44 +0100227 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100228 if (engine)
229 seq_printf(m, " (%s)", engine->name);
230
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100231 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
232 if (frontbuffer_bits)
233 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100234}
235
Chris Wilsone637d2c2017-03-16 13:19:57 +0000236static int obj_rank_by_stolen(const void *A, const void *B)
Chris Wilson6d2b88852013-08-07 18:30:54 +0100237{
Chris Wilsone637d2c2017-03-16 13:19:57 +0000238 const struct drm_i915_gem_object *a =
239 *(const struct drm_i915_gem_object **)A;
240 const struct drm_i915_gem_object *b =
241 *(const struct drm_i915_gem_object **)B;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100242
Rasmus Villemoes2d05fa12015-09-28 23:08:50 +0200243 if (a->stolen->start < b->stolen->start)
244 return -1;
245 if (a->stolen->start > b->stolen->start)
246 return 1;
247 return 0;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100248}
249
250static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
251{
David Weinehall36cdd012016-08-22 13:59:31 +0300252 struct drm_i915_private *dev_priv = node_to_i915(m->private);
253 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000254 struct drm_i915_gem_object **objects;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100255 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300256 u64 total_obj_size, total_gtt_size;
Chris Wilsone637d2c2017-03-16 13:19:57 +0000257 unsigned long total, count, n;
258 int ret;
259
260 total = READ_ONCE(dev_priv->mm.object_count);
Michal Hocko20981052017-05-17 14:23:12 +0200261 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000262 if (!objects)
263 return -ENOMEM;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100264
265 ret = mutex_lock_interruptible(&dev->struct_mutex);
266 if (ret)
Chris Wilsone637d2c2017-03-16 13:19:57 +0000267 goto out;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100268
269 total_obj_size = total_gtt_size = count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100270
271 spin_lock(&dev_priv->mm.obj_lock);
272 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000273 if (count == total)
274 break;
275
Chris Wilson6d2b88852013-08-07 18:30:54 +0100276 if (obj->stolen == NULL)
277 continue;
278
Chris Wilsone637d2c2017-03-16 13:19:57 +0000279 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100280 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100281 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000282
Chris Wilson6d2b88852013-08-07 18:30:54 +0100283 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100284 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilsone637d2c2017-03-16 13:19:57 +0000285 if (count == total)
286 break;
287
Chris Wilson6d2b88852013-08-07 18:30:54 +0100288 if (obj->stolen == NULL)
289 continue;
290
Chris Wilsone637d2c2017-03-16 13:19:57 +0000291 objects[count++] = obj;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100292 total_obj_size += obj->base.size;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100293 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100294 spin_unlock(&dev_priv->mm.obj_lock);
Chris Wilson6d2b88852013-08-07 18:30:54 +0100295
Chris Wilsone637d2c2017-03-16 13:19:57 +0000296 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
297
298 seq_puts(m, "Stolen:\n");
299 for (n = 0; n < count; n++) {
300 seq_puts(m, " ");
301 describe_obj(m, objects[n]);
302 seq_putc(m, '\n');
303 }
304 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
Chris Wilson6d2b88852013-08-07 18:30:54 +0100305 count, total_obj_size, total_gtt_size);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000306
307 mutex_unlock(&dev->struct_mutex);
308out:
Michal Hocko20981052017-05-17 14:23:12 +0200309 kvfree(objects);
Chris Wilsone637d2c2017-03-16 13:19:57 +0000310 return ret;
Chris Wilson6d2b88852013-08-07 18:30:54 +0100311}
312
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100313struct file_stats {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000314 struct i915_address_space *vm;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300315 unsigned long count;
316 u64 total, unbound;
317 u64 global, shared;
318 u64 active, inactive;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000319 u64 closed;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100320};
321
322static int per_file_stats(int id, void *ptr, void *data)
323{
324 struct drm_i915_gem_object *obj = ptr;
325 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000326 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100327
Chris Wilson0caf81b2017-06-17 12:57:44 +0100328 lockdep_assert_held(&obj->base.dev->struct_mutex);
329
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100330 stats->count++;
331 stats->total += obj->base.size;
Chris Wilson15717de2016-08-04 07:52:26 +0100332 if (!obj->bind_count)
333 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000334 if (obj->base.name || obj->base.dma_buf)
335 stats->shared += obj->base.size;
336
Chris Wilson528cbd12019-01-28 10:23:54 +0000337 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100338 if (!drm_mm_node_allocated(&vma->node))
339 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000340
Chris Wilson3272db52016-08-04 16:32:32 +0100341 if (i915_vma_is_ggtt(vma)) {
Chris Wilson894eeec2016-08-04 07:52:20 +0100342 stats->global += vma->node.size;
343 } else {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000344 if (vma->vm != stats->vm)
Chris Wilson6313c202014-03-19 13:45:45 +0000345 continue;
Chris Wilson6313c202014-03-19 13:45:45 +0000346 }
Chris Wilson894eeec2016-08-04 07:52:20 +0100347
Chris Wilsonb0decaf2016-08-04 07:52:44 +0100348 if (i915_vma_is_active(vma))
Chris Wilson894eeec2016-08-04 07:52:20 +0100349 stats->active += vma->node.size;
350 else
351 stats->inactive += vma->node.size;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000352
353 if (i915_vma_is_closed(vma))
354 stats->closed += vma->node.size;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100355 }
356
357 return 0;
358}
359
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100360#define print_file_stats(m, name, stats) do { \
361 if (stats.count) \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000362 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100363 name, \
364 stats.count, \
365 stats.total, \
366 stats.active, \
367 stats.inactive, \
368 stats.global, \
369 stats.shared, \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000370 stats.unbound, \
371 stats.closed); \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100372} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800373
374static void print_batch_pool_stats(struct seq_file *m,
375 struct drm_i915_private *dev_priv)
376{
377 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000378 struct intel_engine_cs *engine;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000379 struct file_stats stats = {};
Akash Goel3b3f1652016-10-13 22:44:48 +0530380 enum intel_engine_id id;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000381 int j;
Brad Volkin493018d2014-12-11 12:13:08 -0800382
Akash Goel3b3f1652016-10-13 22:44:48 +0530383 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000384 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100385 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000386 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100387 batch_pool_link)
388 per_file_stats(0, obj, &stats);
389 }
Chris Wilson06fbca72015-04-07 16:20:36 +0100390 }
Brad Volkin493018d2014-12-11 12:13:08 -0800391
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100392 print_file_stats(m, "[k]batch pool", stats);
Brad Volkin493018d2014-12-11 12:13:08 -0800393}
394
Chris Wilson15da9562016-05-24 14:53:43 +0100395static void print_context_stats(struct seq_file *m,
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000396 struct drm_i915_private *i915)
Chris Wilson15da9562016-05-24 14:53:43 +0100397{
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000398 struct file_stats kstats = {};
399 struct i915_gem_context *ctx;
Chris Wilson15da9562016-05-24 14:53:43 +0100400
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000401 list_for_each_entry(ctx, &i915->contexts.list, link) {
Chris Wilson02684442019-04-26 17:33:35 +0100402 struct i915_gem_engines_iter it;
Chris Wilson7e3d9a52019-03-08 13:25:16 +0000403 struct intel_context *ce;
Chris Wilson15da9562016-05-24 14:53:43 +0100404
Chris Wilson02684442019-04-26 17:33:35 +0100405 for_each_gem_engine(ce,
406 i915_gem_context_lock_engines(ctx), it) {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000407 if (ce->state)
408 per_file_stats(0, ce->state->obj, &kstats);
409 if (ce->ring)
410 per_file_stats(0, ce->ring->vma->obj, &kstats);
411 }
Chris Wilson02684442019-04-26 17:33:35 +0100412 i915_gem_context_unlock_engines(ctx);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000413
414 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
415 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
416 struct drm_file *file = ctx->file_priv->file;
417 struct task_struct *task;
418 char name[80];
419
420 spin_lock(&file->table_lock);
421 idr_for_each(&file->object_idr, per_file_stats, &stats);
422 spin_unlock(&file->table_lock);
423
424 rcu_read_lock();
425 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
Chris Wilson3e055312019-03-21 14:07:10 +0000426 snprintf(name, sizeof(name), "%s",
427 task ? task->comm : "<unknown>");
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000428 rcu_read_unlock();
429
430 print_file_stats(m, name, stats);
431 }
Chris Wilson15da9562016-05-24 14:53:43 +0100432 }
Chris Wilson15da9562016-05-24 14:53:43 +0100433
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000434 print_file_stats(m, "[k]contexts", kstats);
Chris Wilson15da9562016-05-24 14:53:43 +0100435}
436
David Weinehall36cdd012016-08-22 13:59:31 +0300437static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100438{
David Weinehall36cdd012016-08-22 13:59:31 +0300439 struct drm_i915_private *dev_priv = node_to_i915(m->private);
440 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinen72e96d62016-03-30 16:57:10 +0300441 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100442 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
443 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
Chris Wilson6299f992010-11-24 12:23:44 +0000444 struct drm_i915_gem_object *obj;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100445 unsigned int page_sizes = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100446 char buf[80];
Chris Wilson73aa8082010-09-30 11:46:12 +0100447 int ret;
448
Chris Wilson3ef7f222016-10-18 13:02:48 +0100449 seq_printf(m, "%u objects, %llu bytes\n",
Chris Wilson6299f992010-11-24 12:23:44 +0000450 dev_priv->mm.object_count,
451 dev_priv->mm.object_memory);
452
Chris Wilson1544c422016-08-15 13:18:16 +0100453 size = count = 0;
454 mapped_size = mapped_count = 0;
455 purgeable_size = purgeable_count = 0;
Matthew Auld7393b7e2017-10-06 23:18:28 +0100456 huge_size = huge_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100457
458 spin_lock(&dev_priv->mm.obj_lock);
459 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100460 size += obj->base.size;
461 ++count;
Chris Wilson6c085a72012-08-20 11:40:46 +0200462
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100463 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilsonb7abb712012-08-20 11:33:30 +0200464 purgeable_size += obj->base.size;
465 ++purgeable_count;
466 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100467
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100468 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100469 mapped_count++;
470 mapped_size += obj->base.size;
Tvrtko Ursulinbe19b102016-04-15 11:34:53 +0100471 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100472
473 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
474 huge_count++;
475 huge_size += obj->base.size;
476 page_sizes |= obj->mm.page_sizes.sg;
477 }
Chris Wilson6299f992010-11-24 12:23:44 +0000478 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100479 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
480
481 size = count = dpy_size = dpy_count = 0;
Chris Wilsonf2123812017-10-16 12:40:37 +0100482 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100483 size += obj->base.size;
484 ++count;
485
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100486 if (obj->pin_global) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100487 dpy_size += obj->base.size;
488 ++dpy_count;
489 }
490
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100491 if (obj->mm.madv == I915_MADV_DONTNEED) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100492 purgeable_size += obj->base.size;
493 ++purgeable_count;
494 }
495
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100496 if (obj->mm.mapping) {
Chris Wilson2bd160a2016-08-15 10:48:45 +0100497 mapped_count++;
498 mapped_size += obj->base.size;
499 }
Matthew Auld7393b7e2017-10-06 23:18:28 +0100500
501 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
502 huge_count++;
503 huge_size += obj->base.size;
504 page_sizes |= obj->mm.page_sizes.sg;
505 }
Chris Wilson2bd160a2016-08-15 10:48:45 +0100506 }
Chris Wilsonf2123812017-10-16 12:40:37 +0100507 spin_unlock(&dev_priv->mm.obj_lock);
508
Chris Wilson2bd160a2016-08-15 10:48:45 +0100509 seq_printf(m, "%u bound objects, %llu bytes\n",
510 count, size);
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300511 seq_printf(m, "%u purgeable objects, %llu bytes\n",
Chris Wilsonb7abb712012-08-20 11:33:30 +0200512 purgeable_count, purgeable_size);
Chris Wilson2bd160a2016-08-15 10:48:45 +0100513 seq_printf(m, "%u mapped objects, %llu bytes\n",
514 mapped_count, mapped_size);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100515 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
516 huge_count,
517 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
518 huge_size);
Chris Wilsonbd3d2252017-10-13 21:26:14 +0100519 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
Chris Wilson2bd160a2016-08-15 10:48:45 +0100520 dpy_count, dpy_size);
Chris Wilson6299f992010-11-24 12:23:44 +0000521
Matthew Auldb7128ef2017-12-11 15:18:22 +0000522 seq_printf(m, "%llu [%pa] gtt total\n",
Chris Wilson82ad6442018-06-05 16:37:58 +0100523 ggtt->vm.total, &ggtt->mappable_end);
Matthew Auld7393b7e2017-10-06 23:18:28 +0100524 seq_printf(m, "Supported page sizes: %s\n",
525 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
526 buf, sizeof(buf)));
Chris Wilson73aa8082010-09-30 11:46:12 +0100527
Damien Lespiau267f0c92013-06-24 22:59:48 +0100528 seq_putc(m, '\n');
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000529
530 ret = mutex_lock_interruptible(&dev->struct_mutex);
531 if (ret)
532 return ret;
533
Brad Volkin493018d2014-12-11 12:13:08 -0800534 print_batch_pool_stats(m, dev_priv);
Chris Wilson15da9562016-05-24 14:53:43 +0100535 print_context_stats(m, dev_priv);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000536 mutex_unlock(&dev->struct_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100537
538 return 0;
539}
540
Damien Lespiauaee56cf2013-06-24 22:59:49 +0100541static int i915_gem_gtt_info(struct seq_file *m, void *data)
Chris Wilson08c18322011-01-10 00:00:24 +0000542{
Damien Lespiau9f25d002014-05-13 15:30:28 +0100543 struct drm_info_node *node = m->private;
David Weinehall36cdd012016-08-22 13:59:31 +0300544 struct drm_i915_private *dev_priv = node_to_i915(node);
545 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonf2123812017-10-16 12:40:37 +0100546 struct drm_i915_gem_object **objects;
Chris Wilson08c18322011-01-10 00:00:24 +0000547 struct drm_i915_gem_object *obj;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300548 u64 total_obj_size, total_gtt_size;
Chris Wilsonf2123812017-10-16 12:40:37 +0100549 unsigned long nobject, n;
Chris Wilson08c18322011-01-10 00:00:24 +0000550 int count, ret;
551
Chris Wilsonf2123812017-10-16 12:40:37 +0100552 nobject = READ_ONCE(dev_priv->mm.object_count);
553 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
554 if (!objects)
555 return -ENOMEM;
556
Chris Wilson08c18322011-01-10 00:00:24 +0000557 ret = mutex_lock_interruptible(&dev->struct_mutex);
558 if (ret)
559 return ret;
560
Chris Wilsonf2123812017-10-16 12:40:37 +0100561 count = 0;
562 spin_lock(&dev_priv->mm.obj_lock);
563 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
564 objects[count++] = obj;
565 if (count == nobject)
566 break;
567 }
568 spin_unlock(&dev_priv->mm.obj_lock);
569
570 total_obj_size = total_gtt_size = 0;
571 for (n = 0; n < count; n++) {
572 obj = objects[n];
573
Damien Lespiau267f0c92013-06-24 22:59:48 +0100574 seq_puts(m, " ");
Chris Wilson08c18322011-01-10 00:00:24 +0000575 describe_obj(m, obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100576 seq_putc(m, '\n');
Chris Wilson08c18322011-01-10 00:00:24 +0000577 total_obj_size += obj->base.size;
Tvrtko Ursulinca1543b2015-07-01 11:51:10 +0100578 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
Chris Wilson08c18322011-01-10 00:00:24 +0000579 }
580
581 mutex_unlock(&dev->struct_mutex);
582
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300583 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
Chris Wilson08c18322011-01-10 00:00:24 +0000584 count, total_obj_size, total_gtt_size);
Chris Wilsonf2123812017-10-16 12:40:37 +0100585 kvfree(objects);
Chris Wilson08c18322011-01-10 00:00:24 +0000586
587 return 0;
588}
589
Brad Volkin493018d2014-12-11 12:13:08 -0800590static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
591{
David Weinehall36cdd012016-08-22 13:59:31 +0300592 struct drm_i915_private *dev_priv = node_to_i915(m->private);
593 struct drm_device *dev = &dev_priv->drm;
Brad Volkin493018d2014-12-11 12:13:08 -0800594 struct drm_i915_gem_object *obj;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000595 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530596 enum intel_engine_id id;
Chris Wilson8d9d5742015-04-07 16:20:38 +0100597 int total = 0;
Dave Gordonb4ac5af2016-03-24 11:20:38 +0000598 int ret, j;
Brad Volkin493018d2014-12-11 12:13:08 -0800599
600 ret = mutex_lock_interruptible(&dev->struct_mutex);
601 if (ret)
602 return ret;
603
Akash Goel3b3f1652016-10-13 22:44:48 +0530604 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000605 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
Chris Wilson8d9d5742015-04-07 16:20:38 +0100606 int count;
607
608 count = 0;
609 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000610 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100611 batch_pool_link)
612 count++;
613 seq_printf(m, "%s cache[%d]: %d objects\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000614 engine->name, j, count);
Chris Wilson8d9d5742015-04-07 16:20:38 +0100615
616 list_for_each_entry(obj,
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000617 &engine->batch_pool.cache_list[j],
Chris Wilson8d9d5742015-04-07 16:20:38 +0100618 batch_pool_link) {
619 seq_puts(m, " ");
620 describe_obj(m, obj);
621 seq_putc(m, '\n');
622 }
623
624 total += count;
Chris Wilson06fbca72015-04-07 16:20:36 +0100625 }
Brad Volkin493018d2014-12-11 12:13:08 -0800626 }
627
Chris Wilson8d9d5742015-04-07 16:20:38 +0100628 seq_printf(m, "total: %d\n", total);
Brad Volkin493018d2014-12-11 12:13:08 -0800629
630 mutex_unlock(&dev->struct_mutex);
631
632 return 0;
633}
634
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200635static void gen8_display_interrupt_info(struct seq_file *m)
636{
637 struct drm_i915_private *dev_priv = node_to_i915(m->private);
638 int pipe;
639
640 for_each_pipe(dev_priv, pipe) {
641 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000642 intel_wakeref_t wakeref;
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200643
644 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000645 wakeref = intel_display_power_get_if_enabled(dev_priv,
646 power_domain);
647 if (!wakeref) {
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200648 seq_printf(m, "Pipe %c power disabled\n",
649 pipe_name(pipe));
650 continue;
651 }
652 seq_printf(m, "Pipe %c IMR:\t%08x\n",
653 pipe_name(pipe),
654 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
655 seq_printf(m, "Pipe %c IIR:\t%08x\n",
656 pipe_name(pipe),
657 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
658 seq_printf(m, "Pipe %c IER:\t%08x\n",
659 pipe_name(pipe),
660 I915_READ(GEN8_DE_PIPE_IER(pipe)));
661
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000662 intel_display_power_put(dev_priv, power_domain, wakeref);
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200663 }
664
665 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
666 I915_READ(GEN8_DE_PORT_IMR));
667 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
668 I915_READ(GEN8_DE_PORT_IIR));
669 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
670 I915_READ(GEN8_DE_PORT_IER));
671
672 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
673 I915_READ(GEN8_DE_MISC_IMR));
674 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
675 I915_READ(GEN8_DE_MISC_IIR));
676 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
677 I915_READ(GEN8_DE_MISC_IER));
678
679 seq_printf(m, "PCU interrupt mask:\t%08x\n",
680 I915_READ(GEN8_PCU_IMR));
681 seq_printf(m, "PCU interrupt identity:\t%08x\n",
682 I915_READ(GEN8_PCU_IIR));
683 seq_printf(m, "PCU interrupt enable:\t%08x\n",
684 I915_READ(GEN8_PCU_IER));
685}
686
Ben Gamari20172632009-02-17 20:08:50 -0500687static int i915_interrupt_info(struct seq_file *m, void *data)
688{
David Weinehall36cdd012016-08-22 13:59:31 +0300689 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000690 struct intel_engine_cs *engine;
Akash Goel3b3f1652016-10-13 22:44:48 +0530691 enum intel_engine_id id;
Chris Wilsona0371212019-01-14 14:21:14 +0000692 intel_wakeref_t wakeref;
Chris Wilson4bb05042016-09-03 07:53:43 +0100693 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100694
Chris Wilsona0371212019-01-14 14:21:14 +0000695 wakeref = intel_runtime_pm_get(dev_priv);
Ben Gamari20172632009-02-17 20:08:50 -0500696
David Weinehall36cdd012016-08-22 13:59:31 +0300697 if (IS_CHERRYVIEW(dev_priv)) {
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000698 intel_wakeref_t pref;
699
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300700 seq_printf(m, "Master Interrupt Control:\t%08x\n",
701 I915_READ(GEN8_MASTER_IRQ));
702
703 seq_printf(m, "Display IER:\t%08x\n",
704 I915_READ(VLV_IER));
705 seq_printf(m, "Display IIR:\t%08x\n",
706 I915_READ(VLV_IIR));
707 seq_printf(m, "Display IIR_RW:\t%08x\n",
708 I915_READ(VLV_IIR_RW));
709 seq_printf(m, "Display IMR:\t%08x\n",
710 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100711 for_each_pipe(dev_priv, pipe) {
712 enum intel_display_power_domain power_domain;
713
714 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000715 pref = intel_display_power_get_if_enabled(dev_priv,
716 power_domain);
717 if (!pref) {
Chris Wilson9c870d02016-10-24 13:42:15 +0100718 seq_printf(m, "Pipe %c power disabled\n",
719 pipe_name(pipe));
720 continue;
721 }
722
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300723 seq_printf(m, "Pipe %c stat:\t%08x\n",
724 pipe_name(pipe),
725 I915_READ(PIPESTAT(pipe)));
726
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000727 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson9c870d02016-10-24 13:42:15 +0100728 }
729
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000730 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300731 seq_printf(m, "Port hotplug:\t%08x\n",
732 I915_READ(PORT_HOTPLUG_EN));
733 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
734 I915_READ(VLV_DPFLIPSTAT));
735 seq_printf(m, "DPINVGTT:\t%08x\n",
736 I915_READ(DPINVGTT));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000737 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300738
739 for (i = 0; i < 4; i++) {
740 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
741 i, I915_READ(GEN8_GT_IMR(i)));
742 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
743 i, I915_READ(GEN8_GT_IIR(i)));
744 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
745 i, I915_READ(GEN8_GT_IER(i)));
746 }
747
748 seq_printf(m, "PCU interrupt mask:\t%08x\n",
749 I915_READ(GEN8_PCU_IMR));
750 seq_printf(m, "PCU interrupt identity:\t%08x\n",
751 I915_READ(GEN8_PCU_IIR));
752 seq_printf(m, "PCU interrupt enable:\t%08x\n",
753 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200754 } else if (INTEL_GEN(dev_priv) >= 11) {
755 seq_printf(m, "Master Interrupt Control: %08x\n",
756 I915_READ(GEN11_GFX_MSTR_IRQ));
757
758 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
759 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
760 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
761 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
762 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
763 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
764 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
765 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
766 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
767 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
768 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
769 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
770
771 seq_printf(m, "Display Interrupt Control:\t%08x\n",
772 I915_READ(GEN11_DISPLAY_INT_CTL));
773
774 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300775 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700776 seq_printf(m, "Master Interrupt Control:\t%08x\n",
777 I915_READ(GEN8_MASTER_IRQ));
778
779 for (i = 0; i < 4; i++) {
780 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
781 i, I915_READ(GEN8_GT_IMR(i)));
782 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
783 i, I915_READ(GEN8_GT_IIR(i)));
784 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
785 i, I915_READ(GEN8_GT_IER(i)));
786 }
787
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200788 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300789 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700790 seq_printf(m, "Display IER:\t%08x\n",
791 I915_READ(VLV_IER));
792 seq_printf(m, "Display IIR:\t%08x\n",
793 I915_READ(VLV_IIR));
794 seq_printf(m, "Display IIR_RW:\t%08x\n",
795 I915_READ(VLV_IIR_RW));
796 seq_printf(m, "Display IMR:\t%08x\n",
797 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000798 for_each_pipe(dev_priv, pipe) {
799 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000800 intel_wakeref_t pref;
Chris Wilson4f4631a2017-02-10 13:36:32 +0000801
802 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000803 pref = intel_display_power_get_if_enabled(dev_priv,
804 power_domain);
805 if (!pref) {
Chris Wilson4f4631a2017-02-10 13:36:32 +0000806 seq_printf(m, "Pipe %c power disabled\n",
807 pipe_name(pipe));
808 continue;
809 }
810
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700811 seq_printf(m, "Pipe %c stat:\t%08x\n",
812 pipe_name(pipe),
813 I915_READ(PIPESTAT(pipe)));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000814 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson4f4631a2017-02-10 13:36:32 +0000815 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700816
817 seq_printf(m, "Master IER:\t%08x\n",
818 I915_READ(VLV_MASTER_IER));
819
820 seq_printf(m, "Render IER:\t%08x\n",
821 I915_READ(GTIER));
822 seq_printf(m, "Render IIR:\t%08x\n",
823 I915_READ(GTIIR));
824 seq_printf(m, "Render IMR:\t%08x\n",
825 I915_READ(GTIMR));
826
827 seq_printf(m, "PM IER:\t\t%08x\n",
828 I915_READ(GEN6_PMIER));
829 seq_printf(m, "PM IIR:\t\t%08x\n",
830 I915_READ(GEN6_PMIIR));
831 seq_printf(m, "PM IMR:\t\t%08x\n",
832 I915_READ(GEN6_PMIMR));
833
834 seq_printf(m, "Port hotplug:\t%08x\n",
835 I915_READ(PORT_HOTPLUG_EN));
836 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
837 I915_READ(VLV_DPFLIPSTAT));
838 seq_printf(m, "DPINVGTT:\t%08x\n",
839 I915_READ(DPINVGTT));
840
David Weinehall36cdd012016-08-22 13:59:31 +0300841 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800842 seq_printf(m, "Interrupt enable: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700843 I915_READ(GEN2_IER));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800844 seq_printf(m, "Interrupt identity: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700845 I915_READ(GEN2_IIR));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800846 seq_printf(m, "Interrupt mask: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700847 I915_READ(GEN2_IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100848 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800849 seq_printf(m, "Pipe %c stat: %08x\n",
850 pipe_name(pipe),
851 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800852 } else {
853 seq_printf(m, "North Display Interrupt enable: %08x\n",
854 I915_READ(DEIER));
855 seq_printf(m, "North Display Interrupt identity: %08x\n",
856 I915_READ(DEIIR));
857 seq_printf(m, "North Display Interrupt mask: %08x\n",
858 I915_READ(DEIMR));
859 seq_printf(m, "South Display Interrupt enable: %08x\n",
860 I915_READ(SDEIER));
861 seq_printf(m, "South Display Interrupt identity: %08x\n",
862 I915_READ(SDEIIR));
863 seq_printf(m, "South Display Interrupt mask: %08x\n",
864 I915_READ(SDEIMR));
865 seq_printf(m, "Graphics Interrupt enable: %08x\n",
866 I915_READ(GTIER));
867 seq_printf(m, "Graphics Interrupt identity: %08x\n",
868 I915_READ(GTIIR));
869 seq_printf(m, "Graphics Interrupt mask: %08x\n",
870 I915_READ(GTIMR));
871 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200872
873 if (INTEL_GEN(dev_priv) >= 11) {
874 seq_printf(m, "RCS Intr Mask:\t %08x\n",
875 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
876 seq_printf(m, "BCS Intr Mask:\t %08x\n",
877 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
878 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
879 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
880 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
881 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
882 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
883 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
884 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
885 I915_READ(GEN11_GUC_SG_INTR_MASK));
886 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
887 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
888 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
889 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
890 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
891 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
892
893 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilsond5acadf2017-12-09 10:44:18 +0000894 for_each_engine(engine, dev_priv, id) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100895 seq_printf(m,
896 "Graphics Interrupt mask (%s): %08x\n",
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -0700897 engine->name, ENGINE_READ(engine, RING_IMR));
Chris Wilson9862e602011-01-04 22:22:17 +0000898 }
Chris Wilson9862e602011-01-04 22:22:17 +0000899 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200900
Chris Wilsona0371212019-01-14 14:21:14 +0000901 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100902
Ben Gamari20172632009-02-17 20:08:50 -0500903 return 0;
904}
905
Chris Wilsona6172a82009-02-11 14:26:38 +0000906static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
907{
David Weinehall36cdd012016-08-22 13:59:31 +0300908 struct drm_i915_private *dev_priv = node_to_i915(m->private);
909 struct drm_device *dev = &dev_priv->drm;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100910 int i, ret;
911
912 ret = mutex_lock_interruptible(&dev->struct_mutex);
913 if (ret)
914 return ret;
Chris Wilsona6172a82009-02-11 14:26:38 +0000915
Chris Wilsona6172a82009-02-11 14:26:38 +0000916 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
917 for (i = 0; i < dev_priv->num_fence_regs; i++) {
Chris Wilson49ef5292016-08-18 17:17:00 +0100918 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000919
Chris Wilson6c085a72012-08-20 11:40:46 +0200920 seq_printf(m, "Fence %d, pin count = %d, object = ",
921 i, dev_priv->fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100922 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100923 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100924 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100925 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100926 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000927 }
928
Chris Wilson05394f32010-11-08 19:18:58 +0000929 mutex_unlock(&dev->struct_mutex);
Chris Wilsona6172a82009-02-11 14:26:38 +0000930 return 0;
931}
932
Chris Wilson98a2f412016-10-12 10:05:18 +0100933#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000934static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
935 size_t count, loff_t *pos)
936{
Chris Wilson0e390372018-11-23 13:23:25 +0000937 struct i915_gpu_state *error;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000938 ssize_t ret;
Chris Wilson0e390372018-11-23 13:23:25 +0000939 void *buf;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000940
Chris Wilson0e390372018-11-23 13:23:25 +0000941 error = file->private_data;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000942 if (!error)
943 return 0;
944
Chris Wilson0e390372018-11-23 13:23:25 +0000945 /* Bounce buffer required because of kernfs __user API convenience. */
946 buf = kmalloc(count, GFP_KERNEL);
947 if (!buf)
948 return -ENOMEM;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000949
Chris Wilson0e390372018-11-23 13:23:25 +0000950 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
951 if (ret <= 0)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000952 goto out;
953
Chris Wilson0e390372018-11-23 13:23:25 +0000954 if (!copy_to_user(ubuf, buf, ret))
955 *pos += ret;
956 else
957 ret = -EFAULT;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000958
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000959out:
Chris Wilson0e390372018-11-23 13:23:25 +0000960 kfree(buf);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000961 return ret;
962}
963
964static int gpu_state_release(struct inode *inode, struct file *file)
965{
966 i915_gpu_state_put(file->private_data);
967 return 0;
968}
969
970static int i915_gpu_info_open(struct inode *inode, struct file *file)
971{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100972 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000973 struct i915_gpu_state *gpu;
Chris Wilsona0371212019-01-14 14:21:14 +0000974 intel_wakeref_t wakeref;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000975
Chris Wilsond4225a52019-01-14 14:21:23 +0000976 gpu = NULL;
977 with_intel_runtime_pm(i915, wakeref)
978 gpu = i915_capture_gpu_state(i915);
Chris Wilsone6154e42018-12-07 11:05:54 +0000979 if (IS_ERR(gpu))
980 return PTR_ERR(gpu);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000981
982 file->private_data = gpu;
983 return 0;
984}
985
986static const struct file_operations i915_gpu_info_fops = {
987 .owner = THIS_MODULE,
988 .open = i915_gpu_info_open,
989 .read = gpu_state_read,
990 .llseek = default_llseek,
991 .release = gpu_state_release,
992};
Chris Wilson98a2f412016-10-12 10:05:18 +0100993
Daniel Vetterd5442302012-04-27 15:17:40 +0200994static ssize_t
995i915_error_state_write(struct file *filp,
996 const char __user *ubuf,
997 size_t cnt,
998 loff_t *ppos)
999{
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001000 struct i915_gpu_state *error = filp->private_data;
1001
1002 if (!error)
1003 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001004
1005 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001006 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +02001007
1008 return cnt;
1009}
1010
1011static int i915_error_state_open(struct inode *inode, struct file *file)
1012{
Chris Wilsone6154e42018-12-07 11:05:54 +00001013 struct i915_gpu_state *error;
1014
1015 error = i915_first_error_state(inode->i_private);
1016 if (IS_ERR(error))
1017 return PTR_ERR(error);
1018
1019 file->private_data = error;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +03001020 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +02001021}
1022
Daniel Vetterd5442302012-04-27 15:17:40 +02001023static const struct file_operations i915_error_state_fops = {
1024 .owner = THIS_MODULE,
1025 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001026 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +02001027 .write = i915_error_state_write,
1028 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +00001029 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +02001030};
Chris Wilson98a2f412016-10-12 10:05:18 +01001031#endif
1032
Deepak Sadb4bd12014-03-31 11:30:02 +05301033static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001034{
David Weinehall36cdd012016-08-22 13:59:31 +03001035 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001036 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsona0371212019-01-14 14:21:14 +00001037 intel_wakeref_t wakeref;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001038 int ret = 0;
1039
Chris Wilsona0371212019-01-14 14:21:14 +00001040 wakeref = intel_runtime_pm_get(dev_priv);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001041
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001042 if (IS_GEN(dev_priv, 5)) {
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001043 u16 rgvswctl = I915_READ16(MEMSWCTL);
1044 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1045
1046 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1047 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1048 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1049 MEMSTAT_VID_SHIFT);
1050 seq_printf(m, "Current P-state: %d\n",
1051 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +03001052 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001053 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -08001054
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001055 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1056 seq_printf(m, "Video Turbo Mode: %s\n",
1057 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1058 seq_printf(m, "HW control enabled: %s\n",
1059 yesno(rpmodectl & GEN6_RP_ENABLE));
1060 seq_printf(m, "SW control enabled: %s\n",
1061 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1062 GEN6_RP_MEDIA_SW_MODE));
1063
Chris Wilson337fa6e2019-04-26 09:17:20 +01001064 vlv_punit_get(dev_priv);
Wayne Boyer666a4532015-12-09 12:29:35 -08001065 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Chris Wilson337fa6e2019-04-26 09:17:20 +01001066 vlv_punit_put(dev_priv);
1067
Wayne Boyer666a4532015-12-09 12:29:35 -08001068 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1069 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1070
1071 seq_printf(m, "actual GPU freq: %d MHz\n",
1072 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1073
1074 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001075 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001076
1077 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001078 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001079
1080 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001081 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001082
1083 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001084 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -08001085
1086 seq_printf(m,
1087 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001088 intel_gpu_freq(dev_priv, rps->efficient_freq));
David Weinehall36cdd012016-08-22 13:59:31 +03001089 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -07001090 u32 rp_state_limits;
1091 u32 gt_perf_status;
1092 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +00001093 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001094 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -08001095 u32 rpupei, rpcurup, rpprevup;
1096 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001097 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001098 int max_freq;
1099
Bob Paauwe35040562015-06-25 14:54:07 -07001100 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001101 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -07001102 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1103 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1104 } else {
1105 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1106 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1107 }
1108
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001109 /* RPSTAT1 is in the GT power well */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001110 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001111
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001112 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001113 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +05301114 reqf >>= 23;
1115 else {
1116 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +03001117 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +05301118 reqf >>= 24;
1119 else
1120 reqf >>= 25;
1121 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001122 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001123
Chris Wilson0d8f9492014-03-27 09:06:14 +00001124 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1125 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1126 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1127
Jesse Barnesccab5c82011-01-18 15:49:25 -08001128 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301129 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1130 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1131 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1132 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1133 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1134 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +00001135 cagf = intel_gpu_freq(dev_priv,
1136 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -08001137
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001138 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +01001139
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001140 if (INTEL_GEN(dev_priv) >= 11) {
1141 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1142 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1143 /*
1144 * The equivalent to the PM ISR & IIR cannot be read
1145 * without affecting the current state of the system
1146 */
1147 pm_isr = 0;
1148 pm_iir = 0;
1149 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001150 pm_ier = I915_READ(GEN8_GT_IER(2));
1151 pm_imr = I915_READ(GEN8_GT_IMR(2));
1152 pm_isr = I915_READ(GEN8_GT_ISR(2));
1153 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001154 } else {
1155 pm_ier = I915_READ(GEN6_PMIER);
1156 pm_imr = I915_READ(GEN6_PMIMR);
1157 pm_isr = I915_READ(GEN6_PMISR);
1158 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -03001159 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001160 pm_mask = I915_READ(GEN6_PMINTRMSK);
1161
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001162 seq_printf(m, "Video Turbo Mode: %s\n",
1163 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1164 seq_printf(m, "HW control enabled: %s\n",
1165 yesno(rpmodectl & GEN6_RP_ENABLE));
1166 seq_printf(m, "SW control enabled: %s\n",
1167 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1168 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -07001169
1170 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1171 pm_ier, pm_imr, pm_mask);
1172 if (INTEL_GEN(dev_priv) <= 10)
1173 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1174 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05301175 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001176 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001177 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001178 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001179 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001180 seq_printf(m, "Render p-state VID: %d\n",
1181 gt_perf_status & 0xff);
1182 seq_printf(m, "Render p-state limit: %d\n",
1183 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +00001184 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1185 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1186 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1187 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -03001188 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -08001189 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +05301190 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1191 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1192 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1193 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1194 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1195 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilson60548c52018-07-31 14:26:29 +01001196 seq_printf(m, "Up threshold: %d%%\n",
1197 rps->power.up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +01001198
Akash Goeld6cda9c2016-04-23 00:05:46 +05301199 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1200 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1201 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1202 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1203 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1204 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilson60548c52018-07-31 14:26:29 +01001205 seq_printf(m, "Down threshold: %d%%\n",
1206 rps->power.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001207
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001208 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -07001209 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001210 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001211 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001212 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001213 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001214
1215 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001216 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001217 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001218 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001219 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001220
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02001221 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -07001222 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001223 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001224 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001225 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +02001226 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -07001227 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001228 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +00001229
Chris Wilsond86ed342015-04-27 13:41:19 +01001230 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001231 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001232 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +00001233 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001234 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001235 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001236 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001237 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001238 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001239 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001240 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +01001241 seq_printf(m,
1242 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001243 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001244 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +01001245 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001246 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001247
Ville Syrjälä49cd97a2017-02-07 20:33:45 +02001248 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +03001249 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1250 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1251
Chris Wilsona0371212019-01-14 14:21:14 +00001252 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02001253 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001254}
1255
Ben Widawskyd6369512016-09-20 16:54:32 +03001256static void i915_instdone_info(struct drm_i915_private *dev_priv,
1257 struct seq_file *m,
1258 struct intel_instdone *instdone)
1259{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001260 int slice;
1261 int subslice;
1262
Ben Widawskyd6369512016-09-20 16:54:32 +03001263 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1264 instdone->instdone);
1265
1266 if (INTEL_GEN(dev_priv) <= 3)
1267 return;
1268
1269 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1270 instdone->slice_common);
1271
1272 if (INTEL_GEN(dev_priv) <= 6)
1273 return;
1274
Ben Widawskyf9e61372016-09-20 16:54:33 +03001275 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1276 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1277 slice, subslice, instdone->sampler[slice][subslice]);
1278
1279 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1280 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1281 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001282}
1283
Chris Wilsonf6544492015-01-26 18:03:04 +02001284static int i915_hangcheck_info(struct seq_file *m, void *unused)
1285{
David Weinehall36cdd012016-08-22 13:59:31 +03001286 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001287 struct intel_engine_cs *engine;
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001288 u64 acthd[I915_NUM_ENGINES];
1289 u32 seqno[I915_NUM_ENGINES];
Ben Widawskyd6369512016-09-20 16:54:32 +03001290 struct intel_instdone instdone;
Chris Wilsona0371212019-01-14 14:21:14 +00001291 intel_wakeref_t wakeref;
Dave Gordonc3232b12016-03-23 18:19:53 +00001292 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001293
Chris Wilson2caffbf2019-02-08 15:37:03 +00001294 seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
Chris Wilson8af29b02016-09-09 14:11:47 +01001295 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001296 seq_puts(m, "\tWedged\n");
Chris Wilson8c185ec2017-03-16 17:13:02 +00001297 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001298 seq_puts(m, "\tDevice (global) reset in progress\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001299
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001300 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001301 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001302 return 0;
1303 }
1304
Chris Wilsond4225a52019-01-14 14:21:23 +00001305 with_intel_runtime_pm(dev_priv, wakeref) {
1306 for_each_engine(engine, dev_priv, id) {
1307 acthd[id] = intel_engine_get_active_head(engine);
Chris Wilson89531e72019-02-26 09:49:19 +00001308 seqno[id] = intel_engine_get_hangcheck_seqno(engine);
Chris Wilsond4225a52019-01-14 14:21:23 +00001309 }
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001310
Chris Wilson8a68d462019-03-05 18:03:30 +00001311 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
Mika Kuoppalaebbc7542015-02-05 18:41:48 +02001312 }
1313
Chris Wilson8352aea2017-03-03 09:00:56 +00001314 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1315 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsonf6544492015-01-26 18:03:04 +02001316 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1317 jiffies));
Chris Wilson8352aea2017-03-03 09:00:56 +00001318 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1319 seq_puts(m, "Hangcheck active, work pending\n");
1320 else
1321 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001322
Chris Wilsonf73b5672017-03-02 15:03:56 +00001323 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1324
Akash Goel3b3f1652016-10-13 22:44:48 +05301325 for_each_engine(engine, dev_priv, id) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001326 seq_printf(m, "%s:\n", engine->name);
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001327 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
Chris Wilson89531e72019-02-26 09:49:19 +00001328 engine->hangcheck.last_seqno,
1329 seqno[id],
1330 engine->hangcheck.next_seqno,
Chris Wilsoneb8d0f52019-01-25 13:22:28 +00001331 jiffies_to_msecs(jiffies -
1332 engine->hangcheck.action_timestamp));
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001333
Chris Wilsonf6544492015-01-26 18:03:04 +02001334 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001335 (long long)engine->hangcheck.acthd,
Dave Gordonc3232b12016-03-23 18:19:53 +00001336 (long long)acthd[id]);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001337
Chris Wilson8a68d462019-03-05 18:03:30 +00001338 if (engine->id == RCS0) {
Ben Widawskyd6369512016-09-20 16:54:32 +03001339 seq_puts(m, "\tinstdone read =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001340
Ben Widawskyd6369512016-09-20 16:54:32 +03001341 i915_instdone_info(dev_priv, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001342
Ben Widawskyd6369512016-09-20 16:54:32 +03001343 seq_puts(m, "\tinstdone accu =\n");
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001344
Ben Widawskyd6369512016-09-20 16:54:32 +03001345 i915_instdone_info(dev_priv, m,
1346 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001347 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001348 }
1349
1350 return 0;
1351}
1352
Michel Thierry061d06a2017-06-20 10:57:49 +01001353static int i915_reset_info(struct seq_file *m, void *unused)
1354{
1355 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1356 struct i915_gpu_error *error = &dev_priv->gpu_error;
1357 struct intel_engine_cs *engine;
1358 enum intel_engine_id id;
1359
1360 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1361
1362 for_each_engine(engine, dev_priv, id) {
1363 seq_printf(m, "%s = %u\n", engine->name,
1364 i915_reset_engine_count(error, engine));
1365 }
1366
1367 return 0;
1368}
1369
Ben Widawsky4d855292011-12-12 19:34:16 -08001370static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001371{
David Weinehall36cdd012016-08-22 13:59:31 +03001372 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001373 u32 rgvmodectl, rstdbyctl;
1374 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001375
Ben Widawsky616fdb52011-10-05 11:44:54 -07001376 rgvmodectl = I915_READ(MEMMODECTL);
1377 rstdbyctl = I915_READ(RSTDBYCTL);
1378 crstandvid = I915_READ16(CRSTANDVID);
1379
Jani Nikula742f4912015-09-03 11:16:09 +03001380 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001381 seq_printf(m, "Boost freq: %d\n",
1382 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1383 MEMMODE_BOOST_FREQ_SHIFT);
1384 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001385 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001386 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001387 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001388 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001389 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001390 seq_printf(m, "Starting frequency: P%d\n",
1391 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001392 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001393 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001394 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1395 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1396 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1397 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001398 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001399 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001400 switch (rstdbyctl & RSX_STATUS_MASK) {
1401 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001402 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001403 break;
1404 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001405 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001406 break;
1407 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001408 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001409 break;
1410 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001411 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001412 break;
1413 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001414 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001415 break;
1416 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001417 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001418 break;
1419 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001420 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001421 break;
1422 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001423
1424 return 0;
1425}
1426
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001427static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001428{
Chris Wilson233ebf52017-03-23 10:19:44 +00001429 struct drm_i915_private *i915 = node_to_i915(m->private);
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001430 struct intel_uncore *uncore = &i915->uncore;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001431 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001432 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001433
Chris Wilsond7a133d2017-09-07 14:44:41 +01001434 seq_printf(m, "user.bypass_count = %u\n",
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001435 uncore->user_forcewake.count);
Chris Wilsond7a133d2017-09-07 14:44:41 +01001436
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001437 for_each_fw_domain(fw_domain, uncore, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001438 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001439 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001440 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001441
1442 return 0;
1443}
1444
Mika Kuoppala13628772017-03-15 17:43:02 +02001445static void print_rc6_res(struct seq_file *m,
1446 const char *title,
1447 const i915_reg_t reg)
1448{
1449 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1450
1451 seq_printf(m, "%s %u (%llu us)\n",
1452 title, I915_READ(reg),
1453 intel_rc6_residency_us(dev_priv, reg));
1454}
1455
Deepak S669ab5a2014-01-10 15:18:26 +05301456static int vlv_drpc_info(struct seq_file *m)
1457{
David Weinehall36cdd012016-08-22 13:59:31 +03001458 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001459 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301460
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001461 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301462 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1463
Deepak S669ab5a2014-01-10 15:18:26 +05301464 seq_printf(m, "RC6 Enabled: %s\n",
1465 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1466 GEN6_RC_CTL_EI_MODE(1))));
1467 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001468 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301469 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001470 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301471
Mika Kuoppala13628772017-03-15 17:43:02 +02001472 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1473 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001474
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001475 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301476}
1477
Ben Widawsky4d855292011-12-12 19:34:16 -08001478static int gen6_drpc_info(struct seq_file *m)
1479{
David Weinehall36cdd012016-08-22 13:59:31 +03001480 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001481 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301482 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001483
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001484 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001485 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001486
Ben Widawsky4d855292011-12-12 19:34:16 -08001487 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001488 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301489 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1490 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1491 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001492
Chris Wilsonebb5eb72019-04-26 09:17:21 +01001493 if (INTEL_GEN(dev_priv) <= 7)
Imre Deak51cc9ad2018-02-08 19:41:02 +02001494 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1495 &rc6vids);
Ben Widawsky4d855292011-12-12 19:34:16 -08001496
Eric Anholtfff24e22012-01-23 16:14:05 -08001497 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001498 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1499 seq_printf(m, "RC6 Enabled: %s\n",
1500 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001501 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301502 seq_printf(m, "Render Well Gating Enabled: %s\n",
1503 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1504 seq_printf(m, "Media Well Gating Enabled: %s\n",
1505 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1506 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001507 seq_printf(m, "Deep RC6 Enabled: %s\n",
1508 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1509 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1510 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001511 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001512 switch (gt_core_status & GEN6_RCn_MASK) {
1513 case GEN6_RC0:
1514 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001515 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001516 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001517 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001518 break;
1519 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001520 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001521 break;
1522 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001523 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001524 break;
1525 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001526 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001527 break;
1528 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001529 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001530 break;
1531 }
1532
1533 seq_printf(m, "Core Power Down: %s\n",
1534 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001535 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301536 seq_printf(m, "Render Power Well: %s\n",
1537 (gen9_powergate_status &
1538 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1539 seq_printf(m, "Media Power Well: %s\n",
1540 (gen9_powergate_status &
1541 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1542 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001543
1544 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001545 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1546 GEN6_GT_GFX_RC6_LOCKED);
1547 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1548 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1549 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001550
Imre Deak51cc9ad2018-02-08 19:41:02 +02001551 if (INTEL_GEN(dev_priv) <= 7) {
1552 seq_printf(m, "RC6 voltage: %dmV\n",
1553 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1554 seq_printf(m, "RC6+ voltage: %dmV\n",
1555 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1556 seq_printf(m, "RC6++ voltage: %dmV\n",
1557 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1558 }
1559
Akash Goelf2dd7572016-06-27 20:10:01 +05301560 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001561}
1562
1563static int i915_drpc_info(struct seq_file *m, void *unused)
1564{
David Weinehall36cdd012016-08-22 13:59:31 +03001565 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001566 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00001567 int err = -ENODEV;
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001568
Chris Wilsond4225a52019-01-14 14:21:23 +00001569 with_intel_runtime_pm(dev_priv, wakeref) {
1570 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1571 err = vlv_drpc_info(m);
1572 else if (INTEL_GEN(dev_priv) >= 6)
1573 err = gen6_drpc_info(m);
1574 else
1575 err = ironlake_drpc_info(m);
1576 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001577
1578 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001579}
1580
Daniel Vetter9a851782015-06-18 10:30:22 +02001581static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1582{
David Weinehall36cdd012016-08-22 13:59:31 +03001583 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001584
1585 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1586 dev_priv->fb_tracking.busy_bits);
1587
1588 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1589 dev_priv->fb_tracking.flip_bits);
1590
1591 return 0;
1592}
1593
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001594static int i915_fbc_status(struct seq_file *m, void *unused)
1595{
David Weinehall36cdd012016-08-22 13:59:31 +03001596 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001597 struct intel_fbc *fbc = &dev_priv->fbc;
Chris Wilsona0371212019-01-14 14:21:14 +00001598 intel_wakeref_t wakeref;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001599
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001600 if (!HAS_FBC(dev_priv))
1601 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001602
Chris Wilsona0371212019-01-14 14:21:14 +00001603 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson31388722017-12-20 20:58:48 +00001604 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001605
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001606 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001607 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001608 else
Chris Wilson31388722017-12-20 20:58:48 +00001609 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1610
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001611 if (intel_fbc_is_active(dev_priv)) {
1612 u32 mask;
1613
1614 if (INTEL_GEN(dev_priv) >= 8)
1615 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1616 else if (INTEL_GEN(dev_priv) >= 7)
1617 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1618 else if (INTEL_GEN(dev_priv) >= 5)
1619 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1620 else if (IS_G4X(dev_priv))
1621 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1622 else
1623 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1624 FBC_STAT_COMPRESSED);
1625
1626 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001627 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001628
Chris Wilson31388722017-12-20 20:58:48 +00001629 mutex_unlock(&fbc->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00001630 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001631
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001632 return 0;
1633}
1634
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001635static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001636{
David Weinehall36cdd012016-08-22 13:59:31 +03001637 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001638
David Weinehall36cdd012016-08-22 13:59:31 +03001639 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001640 return -ENODEV;
1641
Rodrigo Vivida46f932014-08-01 02:04:45 -07001642 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001643
1644 return 0;
1645}
1646
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001647static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001648{
David Weinehall36cdd012016-08-22 13:59:31 +03001649 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001650 u32 reg;
1651
David Weinehall36cdd012016-08-22 13:59:31 +03001652 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001653 return -ENODEV;
1654
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001655 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001656
1657 reg = I915_READ(ILK_DPFC_CONTROL);
1658 dev_priv->fbc.false_color = val;
1659
1660 I915_WRITE(ILK_DPFC_CONTROL, val ?
1661 (reg | FBC_CTL_FALSE_COLOR) :
1662 (reg & ~FBC_CTL_FALSE_COLOR));
1663
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001664 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001665 return 0;
1666}
1667
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001668DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1669 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001670 "%llu\n");
1671
Paulo Zanoni92d44622013-05-31 16:33:24 -03001672static int i915_ips_status(struct seq_file *m, void *unused)
1673{
David Weinehall36cdd012016-08-22 13:59:31 +03001674 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001675 intel_wakeref_t wakeref;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001676
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001677 if (!HAS_IPS(dev_priv))
1678 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001679
Chris Wilsona0371212019-01-14 14:21:14 +00001680 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001681
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001682 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001683 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001684
David Weinehall36cdd012016-08-22 13:59:31 +03001685 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001686 seq_puts(m, "Currently: unknown\n");
1687 } else {
1688 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1689 seq_puts(m, "Currently: enabled\n");
1690 else
1691 seq_puts(m, "Currently: disabled\n");
1692 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001693
Chris Wilsona0371212019-01-14 14:21:14 +00001694 intel_runtime_pm_put(dev_priv, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001695
Paulo Zanoni92d44622013-05-31 16:33:24 -03001696 return 0;
1697}
1698
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001699static int i915_sr_status(struct seq_file *m, void *unused)
1700{
David Weinehall36cdd012016-08-22 13:59:31 +03001701 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001702 intel_wakeref_t wakeref;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001703 bool sr_enabled = false;
1704
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001705 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001706
Chris Wilson7342a722017-03-09 14:20:49 +00001707 if (INTEL_GEN(dev_priv) >= 9)
1708 /* no global SR status; inspect per-plane WM */;
1709 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001710 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001711 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001712 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001713 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001714 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001715 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001716 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001717 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001718 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001719 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001720
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001721 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001722
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001723 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001724
1725 return 0;
1726}
1727
Jesse Barnes7648fa92010-05-20 14:28:11 -07001728static int i915_emon_status(struct seq_file *m, void *unused)
1729{
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001730 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001731 intel_wakeref_t wakeref;
Chris Wilsonde227ef2010-07-03 07:58:38 +01001732
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001733 if (!IS_GEN(i915, 5))
Chris Wilson582be6b2012-04-30 19:35:02 +01001734 return -ENODEV;
1735
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001736 with_intel_runtime_pm(i915, wakeref) {
1737 unsigned long temp, chipset, gfx;
Jesse Barnes7648fa92010-05-20 14:28:11 -07001738
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001739 temp = i915_mch_val(i915);
1740 chipset = i915_chipset_val(i915);
1741 gfx = i915_gfx_val(i915);
Chris Wilsona0371212019-01-14 14:21:14 +00001742
Chris Wilson4a8ab5e2019-01-14 14:21:29 +00001743 seq_printf(m, "GMCH temp: %ld\n", temp);
1744 seq_printf(m, "Chipset power: %ld\n", chipset);
1745 seq_printf(m, "GFX power: %ld\n", gfx);
1746 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1747 }
Jesse Barnes7648fa92010-05-20 14:28:11 -07001748
1749 return 0;
1750}
1751
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001752static int i915_ring_freq_table(struct seq_file *m, void *unused)
1753{
David Weinehall36cdd012016-08-22 13:59:31 +03001754 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001755 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301756 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001757 intel_wakeref_t wakeref;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001758 int gpu_freq, ia_freq;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001759
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001760 if (!HAS_LLC(dev_priv))
1761 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001762
Chris Wilsond586b5f2018-03-08 14:26:48 +00001763 min_gpu_freq = rps->min_freq;
1764 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001765 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301766 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001767 min_gpu_freq /= GEN9_FREQ_SCALER;
1768 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301769 }
1770
Damien Lespiau267f0c92013-06-24 22:59:48 +01001771 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001772
Chris Wilsonebb5eb72019-04-26 09:17:21 +01001773 wakeref = intel_runtime_pm_get(dev_priv);
Akash Goelf936ec32015-06-29 14:50:22 +05301774 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001775 ia_freq = gpu_freq;
1776 sandybridge_pcode_read(dev_priv,
1777 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1778 &ia_freq);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001779 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301780 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001781 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001782 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001783 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001784 ((ia_freq >> 0) & 0xff) * 100,
1785 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001786 }
Chris Wilsona0371212019-01-14 14:21:14 +00001787 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilsonebb5eb72019-04-26 09:17:21 +01001788
1789 return 0;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001790}
1791
Chris Wilson44834a62010-08-19 16:09:23 +01001792static int i915_opregion(struct seq_file *m, void *unused)
1793{
David Weinehall36cdd012016-08-22 13:59:31 +03001794 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1795 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001796 struct intel_opregion *opregion = &dev_priv->opregion;
1797 int ret;
1798
1799 ret = mutex_lock_interruptible(&dev->struct_mutex);
1800 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001801 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001802
Jani Nikula2455a8e2015-12-14 12:50:53 +02001803 if (opregion->header)
1804 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001805
1806 mutex_unlock(&dev->struct_mutex);
1807
Daniel Vetter0d38f002012-04-21 22:49:10 +02001808out:
Chris Wilson44834a62010-08-19 16:09:23 +01001809 return 0;
1810}
1811
Jani Nikulaada8f952015-12-15 13:17:12 +02001812static int i915_vbt(struct seq_file *m, void *unused)
1813{
David Weinehall36cdd012016-08-22 13:59:31 +03001814 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001815
1816 if (opregion->vbt)
1817 seq_write(m, opregion->vbt, opregion->vbt_size);
1818
1819 return 0;
1820}
1821
Chris Wilson37811fc2010-08-25 22:45:57 +01001822static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1823{
David Weinehall36cdd012016-08-22 13:59:31 +03001824 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1825 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301826 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001827 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001828 int ret;
1829
1830 ret = mutex_lock_interruptible(&dev->struct_mutex);
1831 if (ret)
1832 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001833
Daniel Vetter06957262015-08-10 13:34:08 +02001834#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001835 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001836 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001837
Chris Wilson25bcce92016-07-02 15:36:00 +01001838 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1839 fbdev_fb->base.width,
1840 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001841 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001842 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001843 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001844 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001845 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001846 seq_putc(m, '\n');
1847 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001848#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001849
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001850 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001851 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301852 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1853 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001854 continue;
1855
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001856 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001857 fb->base.width,
1858 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001859 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001860 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001861 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001862 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001863 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001864 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001865 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001866 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001867 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001868
1869 return 0;
1870}
1871
Chris Wilson7e37f882016-08-02 22:50:21 +01001872static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001873{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001874 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1875 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001876}
1877
Ben Widawskye76d3632011-03-19 18:14:29 -07001878static int i915_context_status(struct seq_file *m, void *unused)
1879{
David Weinehall36cdd012016-08-22 13:59:31 +03001880 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1881 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone2efd132016-05-24 14:53:34 +01001882 struct i915_gem_context *ctx;
Dave Gordonc3232b12016-03-23 18:19:53 +00001883 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001884
Daniel Vetterf3d28872014-05-29 23:23:08 +02001885 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001886 if (ret)
1887 return ret;
1888
Chris Wilson829a0af2017-06-20 12:05:45 +01001889 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson02684442019-04-26 17:33:35 +01001890 struct i915_gem_engines_iter it;
Chris Wilson7e3d9a52019-03-08 13:25:16 +00001891 struct intel_context *ce;
1892
Chris Wilson288f1ce2018-09-04 16:31:17 +01001893 seq_puts(m, "HW context ");
1894 if (!list_empty(&ctx->hw_id_link))
1895 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1896 atomic_read(&ctx->hw_id_pin_count));
Chris Wilsonc84455b2016-08-15 10:49:08 +01001897 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001898 struct task_struct *task;
1899
Chris Wilsonc84455b2016-08-15 10:49:08 +01001900 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001901 if (task) {
1902 seq_printf(m, "(%s [%d]) ",
1903 task->comm, task->pid);
1904 put_task_struct(task);
1905 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001906 } else if (IS_ERR(ctx->file_priv)) {
1907 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001908 } else {
1909 seq_puts(m, "(kernel) ");
1910 }
1911
Chris Wilsonbca44d82016-05-24 14:53:41 +01001912 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1913 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001914
Chris Wilson02684442019-04-26 17:33:35 +01001915 for_each_gem_engine(ce,
1916 i915_gem_context_lock_engines(ctx), it) {
Chris Wilson7e3d9a52019-03-08 13:25:16 +00001917 seq_printf(m, "%s: ", ce->engine->name);
Chris Wilsonbca44d82016-05-24 14:53:41 +01001918 if (ce->state)
Chris Wilsonbf3783e2016-08-15 10:48:54 +01001919 describe_obj(m, ce->state->obj);
Chris Wilsondca33ec2016-08-02 22:50:20 +01001920 if (ce->ring)
Chris Wilson7e37f882016-08-02 22:50:21 +01001921 describe_ctx_ring(m, ce->ring);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001922 seq_putc(m, '\n');
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001923 }
Chris Wilson02684442019-04-26 17:33:35 +01001924 i915_gem_context_unlock_engines(ctx);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001925
Ben Widawskya33afea2013-09-17 21:12:45 -07001926 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001927 }
1928
Daniel Vetterf3d28872014-05-29 23:23:08 +02001929 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001930
1931 return 0;
1932}
1933
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001934static const char *swizzle_string(unsigned swizzle)
1935{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001936 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001937 case I915_BIT_6_SWIZZLE_NONE:
1938 return "none";
1939 case I915_BIT_6_SWIZZLE_9:
1940 return "bit9";
1941 case I915_BIT_6_SWIZZLE_9_10:
1942 return "bit9/bit10";
1943 case I915_BIT_6_SWIZZLE_9_11:
1944 return "bit9/bit11";
1945 case I915_BIT_6_SWIZZLE_9_10_11:
1946 return "bit9/bit10/bit11";
1947 case I915_BIT_6_SWIZZLE_9_17:
1948 return "bit9/bit17";
1949 case I915_BIT_6_SWIZZLE_9_10_17:
1950 return "bit9/bit10/bit17";
1951 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001952 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001953 }
1954
1955 return "bug";
1956}
1957
1958static int i915_swizzle_info(struct seq_file *m, void *data)
1959{
David Weinehall36cdd012016-08-22 13:59:31 +03001960 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001961 intel_wakeref_t wakeref;
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001962
Chris Wilsona0371212019-01-14 14:21:14 +00001963 wakeref = intel_runtime_pm_get(dev_priv);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001964
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001965 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1966 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1967 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1968 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1969
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08001970 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001971 seq_printf(m, "DDC = 0x%08x\n",
1972 I915_READ(DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01001973 seq_printf(m, "DDC2 = 0x%08x\n",
1974 I915_READ(DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001975 seq_printf(m, "C0DRB3 = 0x%04x\n",
1976 I915_READ16(C0DRB3));
1977 seq_printf(m, "C1DRB3 = 0x%04x\n",
1978 I915_READ16(C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03001979 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001980 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1981 I915_READ(MAD_DIMM_C0));
1982 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1983 I915_READ(MAD_DIMM_C1));
1984 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1985 I915_READ(MAD_DIMM_C2));
1986 seq_printf(m, "TILECTL = 0x%08x\n",
1987 I915_READ(TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03001988 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07001989 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1990 I915_READ(GAMTARBMODE));
1991 else
1992 seq_printf(m, "ARB_MODE = 0x%08x\n",
1993 I915_READ(ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001994 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1995 I915_READ(DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001996 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001997
1998 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1999 seq_puts(m, "L-shaped memory detected\n");
2000
Chris Wilsona0371212019-01-14 14:21:14 +00002001 intel_runtime_pm_put(dev_priv, wakeref);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01002002
2003 return 0;
2004}
2005
Chris Wilson7466c292016-08-15 09:49:33 +01002006static const char *rps_power_to_str(unsigned int power)
2007{
2008 static const char * const strings[] = {
2009 [LOW_POWER] = "low power",
2010 [BETWEEN] = "mixed",
2011 [HIGH_POWER] = "high power",
2012 };
2013
2014 if (power >= ARRAY_SIZE(strings) || !strings[power])
2015 return "unknown";
2016
2017 return strings[power];
2018}
2019
Chris Wilson1854d5c2015-04-07 16:20:32 +01002020static int i915_rps_boost_info(struct seq_file *m, void *data)
2021{
David Weinehall36cdd012016-08-22 13:59:31 +03002022 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002023 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002024 u32 act_freq = rps->cur_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00002025 intel_wakeref_t wakeref;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002026
Chris Wilsond4225a52019-01-14 14:21:23 +00002027 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002028 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Chris Wilson337fa6e2019-04-26 09:17:20 +01002029 vlv_punit_get(dev_priv);
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002030 act_freq = vlv_punit_read(dev_priv,
2031 PUNIT_REG_GPU_FREQ_STS);
Chris Wilson337fa6e2019-04-26 09:17:20 +01002032 vlv_punit_put(dev_priv);
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002033 act_freq = (act_freq >> 8) & 0xff;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002034 } else {
2035 act_freq = intel_get_cagf(dev_priv,
2036 I915_READ(GEN6_RPSTAT1));
2037 }
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002038 }
2039
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002040 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson79ffac852019-04-24 21:07:17 +01002041 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
Chris Wilson7b92c1b2017-06-28 13:35:48 +01002042 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002043 atomic_read(&rps->num_waiters));
Chris Wilson60548c52018-07-31 14:26:29 +01002044 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01002045 seq_printf(m, "Frequency requested %d, actual %d\n",
2046 intel_gpu_freq(dev_priv, rps->cur_freq),
2047 intel_gpu_freq(dev_priv, act_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002048 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002049 intel_gpu_freq(dev_priv, rps->min_freq),
2050 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2051 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2052 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01002053 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01002054 intel_gpu_freq(dev_priv, rps->idle_freq),
2055 intel_gpu_freq(dev_priv, rps->efficient_freq),
2056 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02002057
Chris Wilson62eb3c22019-02-13 09:25:04 +00002058 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01002059
Chris Wilson79ffac852019-04-24 21:07:17 +01002060 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
Chris Wilson7466c292016-08-15 09:49:33 +01002061 u32 rpup, rpupei;
2062 u32 rpdown, rpdownei;
2063
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07002064 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson7466c292016-08-15 09:49:33 +01002065 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2066 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2067 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2068 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07002069 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson7466c292016-08-15 09:49:33 +01002070
2071 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Chris Wilson60548c52018-07-31 14:26:29 +01002072 rps_power_to_str(rps->power.mode));
Chris Wilson7466c292016-08-15 09:49:33 +01002073 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002074 rpup && rpupei ? 100 * rpup / rpupei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002075 rps->power.up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002076 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00002077 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01002078 rps->power.down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01002079 } else {
2080 seq_puts(m, "\nRPS Autotuning inactive\n");
2081 }
2082
Chris Wilson8d3afd72015-05-21 21:01:47 +01002083 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01002084}
2085
Ben Widawsky63573eb2013-07-04 11:02:07 -07002086static int i915_llc(struct seq_file *m, void *data)
2087{
David Weinehall36cdd012016-08-22 13:59:31 +03002088 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03002089 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07002090
David Weinehall36cdd012016-08-22 13:59:31 +03002091 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Daniele Ceraolo Spuriof6ac9932019-03-28 10:45:32 -07002092 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
2093 dev_priv->edram_size_mb);
Ben Widawsky63573eb2013-07-04 11:02:07 -07002094
2095 return 0;
2096}
2097
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002098static int i915_huc_load_status_info(struct seq_file *m, void *data)
2099{
2100 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002101 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002102 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002103
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002104 if (!HAS_HUC(dev_priv))
2105 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002106
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002107 p = drm_seq_file_printer(m);
2108 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002109
Chris Wilsond4225a52019-01-14 14:21:23 +00002110 with_intel_runtime_pm(dev_priv, wakeref)
2111 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08002112
2113 return 0;
2114}
2115
Alex Daifdf5d352015-08-12 15:43:37 +01002116static int i915_guc_load_status_info(struct seq_file *m, void *data)
2117{
David Weinehall36cdd012016-08-22 13:59:31 +03002118 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002119 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002120 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01002121
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002122 if (!HAS_GUC(dev_priv))
2123 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01002124
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00002125 p = drm_seq_file_printer(m);
2126 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01002127
Chris Wilsond4225a52019-01-14 14:21:23 +00002128 with_intel_runtime_pm(dev_priv, wakeref) {
2129 u32 tmp = I915_READ(GUC_STATUS);
2130 u32 i;
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302131
Chris Wilsond4225a52019-01-14 14:21:23 +00002132 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2133 seq_printf(m, "\tBootrom status = 0x%x\n",
2134 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2135 seq_printf(m, "\tuKernel status = 0x%x\n",
2136 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2137 seq_printf(m, "\tMIA Core status = 0x%x\n",
2138 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2139 seq_puts(m, "\nScratch registers:\n");
2140 for (i = 0; i < 16; i++) {
2141 seq_printf(m, "\t%2d: \t0x%x\n",
2142 i, I915_READ(SOFT_SCRATCH(i)));
2143 }
2144 }
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05302145
Alex Daifdf5d352015-08-12 15:43:37 +01002146 return 0;
2147}
2148
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002149static const char *
2150stringify_guc_log_type(enum guc_log_buffer_type type)
2151{
2152 switch (type) {
2153 case GUC_ISR_LOG_BUFFER:
2154 return "ISR";
2155 case GUC_DPC_LOG_BUFFER:
2156 return "DPC";
2157 case GUC_CRASH_DUMP_LOG_BUFFER:
2158 return "CRASH";
2159 default:
2160 MISSING_CASE(type);
2161 }
2162
2163 return "";
2164}
2165
Akash Goel5aa1ee42016-10-12 21:54:36 +05302166static void i915_guc_log_info(struct seq_file *m,
2167 struct drm_i915_private *dev_priv)
2168{
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002169 struct intel_guc_log *log = &dev_priv->guc.log;
2170 enum guc_log_buffer_type type;
2171
2172 if (!intel_guc_log_relay_enabled(log)) {
2173 seq_puts(m, "GuC log relay disabled\n");
2174 return;
2175 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302176
Michał Winiarskidb557992018-03-19 10:53:43 +01002177 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05302178
Michał Winiarski6a96be22018-03-19 10:53:42 +01002179 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01002180 log->relay.full_count);
2181
2182 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2183 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2184 stringify_guc_log_type(type),
2185 log->stats[type].flush,
2186 log->stats[type].sampled_overflow);
2187 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05302188}
2189
Dave Gordon8b417c22015-08-12 15:43:44 +01002190static void i915_guc_client_info(struct seq_file *m,
2191 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302192 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01002193{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002194 struct intel_engine_cs *engine;
Dave Gordonc18468c2016-08-09 15:19:22 +01002195 enum intel_engine_id id;
Jani Nikulae5315212019-01-16 11:15:23 +02002196 u64 tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01002197
Oscar Mateob09935a2017-03-22 10:39:53 -07002198 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2199 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02002200 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2201 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01002202
Akash Goel3b3f1652016-10-13 22:44:48 +05302203 for_each_engine(engine, dev_priv, id) {
Dave Gordonc18468c2016-08-09 15:19:22 +01002204 u64 submissions = client->submissions[id];
2205 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01002206 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01002207 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01002208 }
2209 seq_printf(m, "\tTotal: %llu\n", tot);
2210}
2211
2212static int i915_guc_info(struct seq_file *m, void *data)
2213{
David Weinehall36cdd012016-08-22 13:59:31 +03002214 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson334636c2016-11-29 12:10:20 +00002215 const struct intel_guc *guc = &dev_priv->guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01002216
Michał Winiarskidb557992018-03-19 10:53:43 +01002217 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002218 return -ENODEV;
2219
Michał Winiarskidb557992018-03-19 10:53:43 +01002220 i915_guc_log_info(m, dev_priv);
2221
2222 if (!USES_GUC_SUBMISSION(dev_priv))
2223 return 0;
2224
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002225 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01002226
Michał Winiarskidb557992018-03-19 10:53:43 +01002227 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07002228 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01002229 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01002230
Chris Wilson334636c2016-11-29 12:10:20 +00002231 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2232 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Chris Wilsone78c9172018-02-07 21:05:42 +00002233 if (guc->preempt_client) {
2234 seq_printf(m, "\nGuC preempt client @ %p:\n",
2235 guc->preempt_client);
2236 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2237 }
Dave Gordon8b417c22015-08-12 15:43:44 +01002238
2239 /* Add more as required ... */
2240
2241 return 0;
2242}
2243
Oscar Mateoa8b93702017-05-10 15:04:51 +00002244static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002245{
David Weinehall36cdd012016-08-22 13:59:31 +03002246 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Oscar Mateoa8b93702017-05-10 15:04:51 +00002247 const struct intel_guc *guc = &dev_priv->guc;
2248 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05302249 struct intel_guc_client *client = guc->execbuf_client;
Chris Wilson3a891a62019-04-01 17:26:39 +01002250 intel_engine_mask_t tmp;
Oscar Mateoa8b93702017-05-10 15:04:51 +00002251 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002252
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002253 if (!USES_GUC_SUBMISSION(dev_priv))
2254 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002255
Oscar Mateoa8b93702017-05-10 15:04:51 +00002256 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2257 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002258
Oscar Mateoa8b93702017-05-10 15:04:51 +00002259 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2260 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002261
Oscar Mateoa8b93702017-05-10 15:04:51 +00002262 seq_printf(m, "GuC stage descriptor %u:\n", index);
2263 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2264 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2265 seq_printf(m, "\tPriority: %d\n", desc->priority);
2266 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2267 seq_printf(m, "\tEngines used: 0x%x\n",
2268 desc->engines_used);
2269 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2270 desc->db_trigger_phy,
2271 desc->db_trigger_cpu,
2272 desc->db_trigger_uk);
2273 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2274 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01002275 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00002276 desc->wq_addr, desc->wq_size);
2277 seq_putc(m, '\n');
2278
2279 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2280 u32 guc_engine_id = engine->guc_id;
2281 struct guc_execlist_context *lrc =
2282 &desc->lrc[guc_engine_id];
2283
2284 seq_printf(m, "\t%s LRC:\n", engine->name);
2285 seq_printf(m, "\t\tContext desc: 0x%x\n",
2286 lrc->context_desc);
2287 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2288 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2289 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2290 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2291 seq_putc(m, '\n');
2292 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01002293 }
2294
Oscar Mateoa8b93702017-05-10 15:04:51 +00002295 return 0;
2296}
2297
Alex Dai4c7e77f2015-08-12 15:43:40 +01002298static int i915_guc_log_dump(struct seq_file *m, void *data)
2299{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002300 struct drm_info_node *node = m->private;
2301 struct drm_i915_private *dev_priv = node_to_i915(node);
2302 bool dump_load_err = !!node->info_ent->data;
2303 struct drm_i915_gem_object *obj = NULL;
2304 u32 *log;
2305 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01002306
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002307 if (!HAS_GUC(dev_priv))
2308 return -ENODEV;
2309
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002310 if (dump_load_err)
2311 obj = dev_priv->guc.load_err_log;
2312 else if (dev_priv->guc.log.vma)
2313 obj = dev_priv->guc.log.vma->obj;
2314
2315 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002316 return 0;
2317
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002318 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2319 if (IS_ERR(log)) {
2320 DRM_DEBUG("Failed to pin object\n");
2321 seq_puts(m, "(log data unaccessible)\n");
2322 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002323 }
2324
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002325 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2326 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2327 *(log + i), *(log + i + 1),
2328 *(log + i + 2), *(log + i + 3));
2329
Alex Dai4c7e77f2015-08-12 15:43:40 +01002330 seq_putc(m, '\n');
2331
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002332 i915_gem_object_unpin_map(obj);
2333
Alex Dai4c7e77f2015-08-12 15:43:40 +01002334 return 0;
2335}
2336
Michał Winiarski4977a282018-03-19 10:53:40 +01002337static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302338{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002339 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302340
Michał Winiarski86aa8242018-03-08 16:46:53 +01002341 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002342 return -ENODEV;
2343
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002344 *val = intel_guc_log_get_level(&dev_priv->guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302345
2346 return 0;
2347}
2348
Michał Winiarski4977a282018-03-19 10:53:40 +01002349static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302350{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002351 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302352
Michał Winiarski86aa8242018-03-08 16:46:53 +01002353 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002354 return -ENODEV;
2355
Piotr Piórkowski50935ac2018-06-04 16:19:41 +02002356 return intel_guc_log_set_level(&dev_priv->guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302357}
2358
Michał Winiarski4977a282018-03-19 10:53:40 +01002359DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2360 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302361 "%lld\n");
2362
Michał Winiarski4977a282018-03-19 10:53:40 +01002363static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2364{
2365 struct drm_i915_private *dev_priv = inode->i_private;
2366
2367 if (!USES_GUC(dev_priv))
2368 return -ENODEV;
2369
2370 file->private_data = &dev_priv->guc.log;
2371
2372 return intel_guc_log_relay_open(&dev_priv->guc.log);
2373}
2374
2375static ssize_t
2376i915_guc_log_relay_write(struct file *filp,
2377 const char __user *ubuf,
2378 size_t cnt,
2379 loff_t *ppos)
2380{
2381 struct intel_guc_log *log = filp->private_data;
2382
2383 intel_guc_log_relay_flush(log);
2384
2385 return cnt;
2386}
2387
2388static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2389{
2390 struct drm_i915_private *dev_priv = inode->i_private;
2391
2392 intel_guc_log_relay_close(&dev_priv->guc.log);
2393
2394 return 0;
2395}
2396
2397static const struct file_operations i915_guc_log_relay_fops = {
2398 .owner = THIS_MODULE,
2399 .open = i915_guc_log_relay_open,
2400 .write = i915_guc_log_relay_write,
2401 .release = i915_guc_log_relay_release,
2402};
2403
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002404static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2405{
2406 u8 val;
2407 static const char * const sink_status[] = {
2408 "inactive",
2409 "transition to active, capture and display",
2410 "active, display from RFB",
2411 "active, capture and display on sink device timings",
2412 "transition to inactive, capture and display, timing re-sync",
2413 "reserved",
2414 "reserved",
2415 "sink internal error",
2416 };
2417 struct drm_connector *connector = m->private;
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002418 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002419 struct intel_dp *intel_dp =
2420 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002421 int ret;
2422
2423 if (!CAN_PSR(dev_priv)) {
2424 seq_puts(m, "PSR Unsupported\n");
2425 return -ENODEV;
2426 }
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002427
2428 if (connector->status != connector_status_connected)
2429 return -ENODEV;
2430
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002431 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2432
2433 if (ret == 1) {
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002434 const char *str = "unknown";
2435
2436 val &= DP_PSR_SINK_STATE_MASK;
2437 if (val < ARRAY_SIZE(sink_status))
2438 str = sink_status[val];
2439 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2440 } else {
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002441 return ret;
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002442 }
2443
2444 return 0;
2445}
2446DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2447
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302448static void
2449psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002450{
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002451 u32 val, status_val;
2452 const char *status = "unknown";
Chris Wilsonb86bef202017-01-16 13:06:21 +00002453
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302454 if (dev_priv->psr.psr2_enabled) {
2455 static const char * const live_status[] = {
2456 "IDLE",
2457 "CAPTURE",
2458 "CAPTURE_FS",
2459 "SLEEP",
2460 "BUFON_FW",
2461 "ML_UP",
2462 "SU_STANDBY",
2463 "FAST_SLEEP",
2464 "DEEP_SLEEP",
2465 "BUF_ON",
2466 "TG_ON"
2467 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002468 val = I915_READ(EDP_PSR2_STATUS);
2469 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2470 EDP_PSR2_STATUS_STATE_SHIFT;
2471 if (status_val < ARRAY_SIZE(live_status))
2472 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302473 } else {
2474 static const char * const live_status[] = {
2475 "IDLE",
2476 "SRDONACK",
2477 "SRDENT",
2478 "BUFOFF",
2479 "BUFON",
2480 "AUXACK",
2481 "SRDOFFACK",
2482 "SRDENT_ON",
2483 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002484 val = I915_READ(EDP_PSR_STATUS);
2485 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2486 EDP_PSR_STATUS_STATE_SHIFT;
2487 if (status_val < ARRAY_SIZE(live_status))
2488 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302489 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002490
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002491 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
Chris Wilsonb86bef202017-01-16 13:06:21 +00002492}
2493
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002494static int i915_edp_psr_status(struct seq_file *m, void *data)
2495{
David Weinehall36cdd012016-08-22 13:59:31 +03002496 struct drm_i915_private *dev_priv = node_to_i915(m->private);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002497 struct i915_psr *psr = &dev_priv->psr;
Chris Wilsona0371212019-01-14 14:21:14 +00002498 intel_wakeref_t wakeref;
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002499 const char *status;
2500 bool enabled;
2501 u32 val;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002502
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002503 if (!HAS_PSR(dev_priv))
2504 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002505
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002506 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2507 if (psr->dp)
2508 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2509 seq_puts(m, "\n");
2510
2511 if (!psr->sink_support)
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002512 return 0;
2513
Chris Wilsona0371212019-01-14 14:21:14 +00002514 wakeref = intel_runtime_pm_get(dev_priv);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002515 mutex_lock(&psr->lock);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002516
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002517 if (psr->enabled)
2518 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002519 else
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002520 status = "disabled";
2521 seq_printf(m, "PSR mode: %s\n", status);
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002522
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002523 if (!psr->enabled)
2524 goto unlock;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002525
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002526 if (psr->psr2_enabled) {
2527 val = I915_READ(EDP_PSR2_CTL);
2528 enabled = val & EDP_PSR2_ENABLE;
2529 } else {
2530 val = I915_READ(EDP_PSR_CTL);
2531 enabled = val & EDP_PSR_ENABLE;
2532 }
2533 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2534 enableddisabled(enabled), val);
2535 psr_source_status(dev_priv, m);
2536 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2537 psr->busy_frontbuffer_bits);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002538
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002539 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002540 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2541 */
David Weinehall36cdd012016-08-22 13:59:31 +03002542 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002543 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2544 seq_printf(m, "Performance counter: %u\n", val);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002545 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302546
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002547 if (psr->debug & I915_PSR_DEBUG_IRQ) {
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002548 seq_printf(m, "Last attempted entry at: %lld\n",
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002549 psr->last_entry_attempt);
2550 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002551 }
2552
José Roberto de Souzaa81f7812019-01-17 12:55:48 -08002553 if (psr->psr2_enabled) {
2554 u32 su_frames_val[3];
2555 int frame;
2556
2557 /*
2558 * Reading all 3 registers before hand to minimize crossing a
2559 * frame boundary between register reads
2560 */
2561 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2562 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2563
2564 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2565
2566 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2567 u32 su_blocks;
2568
2569 su_blocks = su_frames_val[frame / 3] &
2570 PSR2_SU_STATUS_MASK(frame);
2571 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2572 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2573 }
2574 }
2575
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002576unlock:
2577 mutex_unlock(&psr->lock);
Chris Wilsona0371212019-01-14 14:21:14 +00002578 intel_runtime_pm_put(dev_priv, wakeref);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002579
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002580 return 0;
2581}
2582
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002583static int
2584i915_edp_psr_debug_set(void *data, u64 val)
2585{
2586 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00002587 intel_wakeref_t wakeref;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002588 int ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002589
2590 if (!CAN_PSR(dev_priv))
2591 return -ENODEV;
2592
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002593 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002594
Chris Wilsona0371212019-01-14 14:21:14 +00002595 wakeref = intel_runtime_pm_get(dev_priv);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002596
José Roberto de Souza23ec9f52019-02-06 13:18:45 -08002597 ret = intel_psr_debug_set(dev_priv, val);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002598
Chris Wilsona0371212019-01-14 14:21:14 +00002599 intel_runtime_pm_put(dev_priv, wakeref);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002600
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002601 return ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002602}
2603
2604static int
2605i915_edp_psr_debug_get(void *data, u64 *val)
2606{
2607 struct drm_i915_private *dev_priv = data;
2608
2609 if (!CAN_PSR(dev_priv))
2610 return -ENODEV;
2611
2612 *val = READ_ONCE(dev_priv->psr.debug);
2613 return 0;
2614}
2615
2616DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2617 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2618 "%llu\n");
2619
Jesse Barnesec013e72013-08-20 10:29:23 +01002620static int i915_energy_uJ(struct seq_file *m, void *data)
2621{
David Weinehall36cdd012016-08-22 13:59:31 +03002622 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002623 unsigned long long power;
Chris Wilsona0371212019-01-14 14:21:14 +00002624 intel_wakeref_t wakeref;
Jesse Barnesec013e72013-08-20 10:29:23 +01002625 u32 units;
2626
David Weinehall36cdd012016-08-22 13:59:31 +03002627 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002628 return -ENODEV;
2629
Chris Wilsond4225a52019-01-14 14:21:23 +00002630 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002631 return -ENODEV;
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002632
2633 units = (power & 0x1f00) >> 8;
Chris Wilsond4225a52019-01-14 14:21:23 +00002634 with_intel_runtime_pm(dev_priv, wakeref)
2635 power = I915_READ(MCH_SECP_NRG_STTS);
2636
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002637 power = (1000000 * power) >> units; /* convert to uJ */
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002638 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002639
2640 return 0;
2641}
2642
Damien Lespiau6455c872015-06-04 18:23:57 +01002643static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002644{
David Weinehall36cdd012016-08-22 13:59:31 +03002645 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002646 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002647
Chris Wilsona156e642016-04-03 14:14:21 +01002648 if (!HAS_RUNTIME_PM(dev_priv))
2649 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002650
Chris Wilson25c896bd2019-01-14 14:21:25 +00002651 seq_printf(m, "Runtime power status: %s\n",
2652 enableddisabled(!dev_priv->power_domains.wakeref));
2653
Chris Wilsond9948a12019-02-28 10:20:35 +00002654 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
Paulo Zanoni371db662013-08-19 13:18:10 -03002655 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002656 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002657#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002658 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002659 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002660#else
2661 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2662#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002663 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002664 pci_power_name(pdev->current_state),
2665 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002666
Chris Wilsonbd780f32019-01-14 14:21:09 +00002667 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2668 struct drm_printer p = drm_seq_file_printer(m);
2669
2670 print_intel_runtime_pm_wakeref(dev_priv, &p);
2671 }
2672
Jesse Barnesec013e72013-08-20 10:29:23 +01002673 return 0;
2674}
2675
Imre Deak1da51582013-11-25 17:15:35 +02002676static int i915_power_domain_info(struct seq_file *m, void *unused)
2677{
David Weinehall36cdd012016-08-22 13:59:31 +03002678 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002679 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2680 int i;
2681
2682 mutex_lock(&power_domains->lock);
2683
2684 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2685 for (i = 0; i < power_domains->power_well_count; i++) {
2686 struct i915_power_well *power_well;
2687 enum intel_display_power_domain power_domain;
2688
2689 power_well = &power_domains->power_wells[i];
Imre Deakf28ec6f2018-08-06 12:58:37 +03002690 seq_printf(m, "%-25s %d\n", power_well->desc->name,
Imre Deak1da51582013-11-25 17:15:35 +02002691 power_well->count);
2692
Imre Deakf28ec6f2018-08-06 12:58:37 +03002693 for_each_power_domain(power_domain, power_well->desc->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002694 seq_printf(m, " %-23s %d\n",
Daniel Stone9895ad02015-11-20 15:55:33 +00002695 intel_display_power_domain_str(power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002696 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002697 }
2698
2699 mutex_unlock(&power_domains->lock);
2700
2701 return 0;
2702}
2703
Damien Lespiaub7cec662015-10-27 14:47:01 +02002704static int i915_dmc_info(struct seq_file *m, void *unused)
2705{
David Weinehall36cdd012016-08-22 13:59:31 +03002706 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002707 intel_wakeref_t wakeref;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002708 struct intel_csr *csr;
2709
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002710 if (!HAS_CSR(dev_priv))
2711 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002712
2713 csr = &dev_priv->csr;
2714
Chris Wilsona0371212019-01-14 14:21:14 +00002715 wakeref = intel_runtime_pm_get(dev_priv);
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002716
Damien Lespiaub7cec662015-10-27 14:47:01 +02002717 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2718 seq_printf(m, "path: %s\n", csr->fw_path);
2719
2720 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002721 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002722
2723 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2724 CSR_VERSION_MINOR(csr->version));
2725
Imre Deak34b2f8d2018-10-31 22:02:20 +02002726 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2727 goto out;
2728
2729 seq_printf(m, "DC3 -> DC5 count: %d\n",
2730 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2731 SKL_CSR_DC3_DC5_COUNT));
2732 if (!IS_GEN9_LP(dev_priv))
Damien Lespiau83372062015-10-30 17:53:32 +02002733 seq_printf(m, "DC5 -> DC6 count: %d\n",
2734 I915_READ(SKL_CSR_DC5_DC6_COUNT));
Damien Lespiau83372062015-10-30 17:53:32 +02002735
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002736out:
2737 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2738 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2739 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2740
Chris Wilsona0371212019-01-14 14:21:14 +00002741 intel_runtime_pm_put(dev_priv, wakeref);
Damien Lespiau83372062015-10-30 17:53:32 +02002742
Damien Lespiaub7cec662015-10-27 14:47:01 +02002743 return 0;
2744}
2745
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002746static void intel_seq_print_mode(struct seq_file *m, int tabs,
2747 struct drm_display_mode *mode)
2748{
2749 int i;
2750
2751 for (i = 0; i < tabs; i++)
2752 seq_putc(m, '\t');
2753
Shayenne Moura4fb6bb82018-12-20 10:27:57 -02002754 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002755}
2756
2757static void intel_encoder_info(struct seq_file *m,
2758 struct intel_crtc *intel_crtc,
2759 struct intel_encoder *intel_encoder)
2760{
David Weinehall36cdd012016-08-22 13:59:31 +03002761 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2762 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002763 struct drm_crtc *crtc = &intel_crtc->base;
2764 struct intel_connector *intel_connector;
2765 struct drm_encoder *encoder;
2766
2767 encoder = &intel_encoder->base;
2768 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002769 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002770 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2771 struct drm_connector *connector = &intel_connector->base;
2772 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2773 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002774 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002775 drm_get_connector_status_name(connector->status));
2776 if (connector->status == connector_status_connected) {
2777 struct drm_display_mode *mode = &crtc->mode;
2778 seq_printf(m, ", mode:\n");
2779 intel_seq_print_mode(m, 2, mode);
2780 } else {
2781 seq_putc(m, '\n');
2782 }
2783 }
2784}
2785
2786static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2787{
David Weinehall36cdd012016-08-22 13:59:31 +03002788 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2789 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002790 struct drm_crtc *crtc = &intel_crtc->base;
2791 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002792 struct drm_plane_state *plane_state = crtc->primary->state;
2793 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002794
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002795 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002796 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002797 fb->base.id, plane_state->src_x >> 16,
2798 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002799 else
2800 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002801 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2802 intel_encoder_info(m, intel_crtc, intel_encoder);
2803}
2804
2805static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2806{
2807 struct drm_display_mode *mode = panel->fixed_mode;
2808
2809 seq_printf(m, "\tfixed mode:\n");
2810 intel_seq_print_mode(m, 2, mode);
2811}
2812
2813static void intel_dp_info(struct seq_file *m,
2814 struct intel_connector *intel_connector)
2815{
2816 struct intel_encoder *intel_encoder = intel_connector->encoder;
2817 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2818
2819 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03002820 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02002821 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002822 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03002823
2824 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2825 &intel_dp->aux);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002826}
2827
Libin Yang9a148a92016-11-28 20:07:05 +08002828static void intel_dp_mst_info(struct seq_file *m,
2829 struct intel_connector *intel_connector)
2830{
2831 struct intel_encoder *intel_encoder = intel_connector->encoder;
2832 struct intel_dp_mst_encoder *intel_mst =
2833 enc_to_mst(&intel_encoder->base);
2834 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2835 struct intel_dp *intel_dp = &intel_dig_port->dp;
2836 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2837 intel_connector->port);
2838
2839 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2840}
2841
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002842static void intel_hdmi_info(struct seq_file *m,
2843 struct intel_connector *intel_connector)
2844{
2845 struct intel_encoder *intel_encoder = intel_connector->encoder;
2846 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2847
Jani Nikula742f4912015-09-03 11:16:09 +03002848 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002849}
2850
2851static void intel_lvds_info(struct seq_file *m,
2852 struct intel_connector *intel_connector)
2853{
2854 intel_panel_info(m, &intel_connector->panel);
2855}
2856
2857static void intel_connector_info(struct seq_file *m,
2858 struct drm_connector *connector)
2859{
2860 struct intel_connector *intel_connector = to_intel_connector(connector);
2861 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08002862 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002863
2864 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03002865 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002866 drm_get_connector_status_name(connector->status));
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002867
2868 if (connector->status == connector_status_disconnected)
2869 return;
2870
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002871 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2872 connector->display_info.width_mm,
2873 connector->display_info.height_mm);
2874 seq_printf(m, "\tsubpixel order: %s\n",
2875 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2876 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002877
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02002878 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002879 return;
2880
2881 switch (connector->connector_type) {
2882 case DRM_MODE_CONNECTOR_DisplayPort:
2883 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08002884 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2885 intel_dp_mst_info(m, intel_connector);
2886 else
2887 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002888 break;
2889 case DRM_MODE_CONNECTOR_LVDS:
2890 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10002891 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002892 break;
2893 case DRM_MODE_CONNECTOR_HDMIA:
2894 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03002895 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002896 intel_hdmi_info(m, intel_connector);
2897 break;
2898 default:
2899 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10002900 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002901
Jesse Barnesf103fc72014-02-20 12:39:57 -08002902 seq_printf(m, "\tmodes:\n");
2903 list_for_each_entry(mode, &connector->modes, head)
2904 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002905}
2906
Robert Fekete3abc4e02015-10-27 16:58:32 +01002907static const char *plane_type(enum drm_plane_type type)
2908{
2909 switch (type) {
2910 case DRM_PLANE_TYPE_OVERLAY:
2911 return "OVL";
2912 case DRM_PLANE_TYPE_PRIMARY:
2913 return "PRI";
2914 case DRM_PLANE_TYPE_CURSOR:
2915 return "CUR";
2916 /*
2917 * Deliberately omitting default: to generate compiler warnings
2918 * when a new drm_plane_type gets added.
2919 */
2920 }
2921
2922 return "unknown";
2923}
2924
Jani Nikula5852a152019-01-07 16:51:49 +02002925static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
Robert Fekete3abc4e02015-10-27 16:58:32 +01002926{
Robert Fekete3abc4e02015-10-27 16:58:32 +01002927 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04002928 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01002929 * will print them all to visualize if the values are misused
2930 */
Jani Nikula5852a152019-01-07 16:51:49 +02002931 snprintf(buf, bufsize,
Robert Fekete3abc4e02015-10-27 16:58:32 +01002932 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04002933 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2934 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2935 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2936 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2937 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2938 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01002939 rotation);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002940}
2941
2942static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2943{
David Weinehall36cdd012016-08-22 13:59:31 +03002944 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2945 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01002946 struct intel_plane *intel_plane;
2947
2948 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2949 struct drm_plane_state *state;
2950 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002951 struct drm_format_name_buf format_name;
Jani Nikula5852a152019-01-07 16:51:49 +02002952 char rot_str[48];
Robert Fekete3abc4e02015-10-27 16:58:32 +01002953
2954 if (!plane->state) {
2955 seq_puts(m, "plane->state is NULL!\n");
2956 continue;
2957 }
2958
2959 state = plane->state;
2960
Eric Engestrom90844f02016-08-15 01:02:38 +01002961 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002962 drm_get_format_name(state->fb->format->format,
2963 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01002964 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002965 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01002966 }
2967
Jani Nikula5852a152019-01-07 16:51:49 +02002968 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2969
Robert Fekete3abc4e02015-10-27 16:58:32 +01002970 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2971 plane->base.id,
2972 plane_type(intel_plane->base.type),
2973 state->crtc_x, state->crtc_y,
2974 state->crtc_w, state->crtc_h,
2975 (state->src_x >> 16),
2976 ((state->src_x & 0xffff) * 15625) >> 10,
2977 (state->src_y >> 16),
2978 ((state->src_y & 0xffff) * 15625) >> 10,
2979 (state->src_w >> 16),
2980 ((state->src_w & 0xffff) * 15625) >> 10,
2981 (state->src_h >> 16),
2982 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002983 format_name.str,
Jani Nikula5852a152019-01-07 16:51:49 +02002984 rot_str);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002985 }
2986}
2987
2988static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2989{
2990 struct intel_crtc_state *pipe_config;
2991 int num_scalers = intel_crtc->num_scalers;
2992 int i;
2993
2994 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2995
2996 /* Not all platformas have a scaler */
2997 if (num_scalers) {
2998 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2999 num_scalers,
3000 pipe_config->scaler_state.scaler_users,
3001 pipe_config->scaler_state.scaler_id);
3002
A.Sunil Kamath58415912016-11-20 23:20:26 +05303003 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01003004 struct intel_scaler *sc =
3005 &pipe_config->scaler_state.scalers[i];
3006
3007 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3008 i, yesno(sc->in_use), sc->mode);
3009 }
3010 seq_puts(m, "\n");
3011 } else {
3012 seq_puts(m, "\tNo scalers available on this platform\n");
3013 }
3014}
3015
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003016static int i915_display_info(struct seq_file *m, void *unused)
3017{
David Weinehall36cdd012016-08-22 13:59:31 +03003018 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3019 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec22014-03-12 09:13:13 +00003020 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003021 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003022 struct drm_connector_list_iter conn_iter;
Chris Wilsona0371212019-01-14 14:21:14 +00003023 intel_wakeref_t wakeref;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003024
Chris Wilsona0371212019-01-14 14:21:14 +00003025 wakeref = intel_runtime_pm_get(dev_priv);
3026
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003027 seq_printf(m, "CRTC info\n");
3028 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01003029 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003030 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003031
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003032 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003033 pipe_config = to_intel_crtc_state(crtc->base.state);
3034
Robert Fekete3abc4e02015-10-27 16:58:32 +01003035 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec22014-03-12 09:13:13 +00003036 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003037 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01003038 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3039 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3040
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003041 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003042 struct intel_plane *cursor =
3043 to_intel_plane(crtc->base.cursor);
3044
Chris Wilson065f2ec22014-03-12 09:13:13 +00003045 intel_crtc_info(m, crtc);
3046
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03003047 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3048 yesno(cursor->base.state->visible),
3049 cursor->base.state->crtc_x,
3050 cursor->base.state->crtc_y,
3051 cursor->base.state->crtc_w,
3052 cursor->base.state->crtc_h,
3053 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01003054 intel_scaler_info(m, crtc);
3055 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03003056 }
Daniel Vettercace8412014-05-22 17:56:31 +02003057
3058 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3059 yesno(!crtc->cpu_fifo_underrun_disabled),
3060 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003061 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003062 }
3063
3064 seq_printf(m, "\n");
3065 seq_printf(m, "Connector info\n");
3066 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003067 mutex_lock(&dev->mode_config.mutex);
3068 drm_connector_list_iter_begin(dev, &conn_iter);
3069 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003070 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003071 drm_connector_list_iter_end(&conn_iter);
3072 mutex_unlock(&dev->mode_config.mutex);
3073
Chris Wilsona0371212019-01-14 14:21:14 +00003074 intel_runtime_pm_put(dev_priv, wakeref);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08003075
3076 return 0;
3077}
3078
Chris Wilson1b365952016-10-04 21:11:31 +01003079static int i915_engine_info(struct seq_file *m, void *unused)
3080{
3081 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3082 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +00003083 intel_wakeref_t wakeref;
Akash Goel3b3f1652016-10-13 22:44:48 +05303084 enum intel_engine_id id;
Chris Wilsonf636edb2017-10-09 12:02:57 +01003085 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01003086
Chris Wilsona0371212019-01-14 14:21:14 +00003087 wakeref = intel_runtime_pm_get(dev_priv);
Chris Wilson9c870d02016-10-24 13:42:15 +01003088
Chris Wilson79ffac852019-04-24 21:07:17 +01003089 seq_printf(m, "GT awake? %s [%d]\n",
3090 yesno(dev_priv->gt.awake),
3091 atomic_read(&dev_priv->gt.wakeref.count));
Lionel Landwerlinf577a032017-11-13 23:34:53 +00003092 seq_printf(m, "CS timestamp frequency: %u kHz\n",
Jani Nikula02584042018-12-31 16:56:41 +02003093 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00003094
Chris Wilsonf636edb2017-10-09 12:02:57 +01003095 p = drm_seq_file_printer(m);
3096 for_each_engine(engine, dev_priv, id)
Chris Wilson0db18b12017-12-08 01:23:00 +00003097 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01003098
Chris Wilsona0371212019-01-14 14:21:14 +00003099 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilson9c870d02016-10-24 13:42:15 +01003100
Chris Wilson1b365952016-10-04 21:11:31 +01003101 return 0;
3102}
3103
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003104static int i915_rcs_topology(struct seq_file *m, void *unused)
3105{
3106 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3107 struct drm_printer p = drm_seq_file_printer(m);
3108
Jani Nikula02584042018-12-31 16:56:41 +02003109 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00003110
3111 return 0;
3112}
3113
Chris Wilsonc5418a82017-10-13 21:26:19 +01003114static int i915_shrinker_info(struct seq_file *m, void *unused)
3115{
3116 struct drm_i915_private *i915 = node_to_i915(m->private);
3117
3118 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3119 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3120
3121 return 0;
3122}
3123
Daniel Vetter728e29d2014-06-25 22:01:53 +03003124static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3125{
David Weinehall36cdd012016-08-22 13:59:31 +03003126 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3127 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03003128 int i;
3129
3130 drm_modeset_lock_all(dev);
3131 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3132 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3133
Lucas De Marchi72f775f2018-03-20 15:06:34 -07003134 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07003135 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01003136 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003137 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03003138 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003139 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02003140 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02003141 pll->state.hw_state.dpll_md);
3142 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3143 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3144 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07003145 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3146 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3147 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3148 pll->state.hw_state.mg_refclkin_ctl);
3149 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3150 pll->state.hw_state.mg_clktop2_coreclkctl1);
3151 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3152 pll->state.hw_state.mg_clktop2_hsclkctl);
3153 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3154 pll->state.hw_state.mg_pll_div0);
3155 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3156 pll->state.hw_state.mg_pll_div1);
3157 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3158 pll->state.hw_state.mg_pll_lf);
3159 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3160 pll->state.hw_state.mg_pll_frac_lock);
3161 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3162 pll->state.hw_state.mg_pll_ssc);
3163 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3164 pll->state.hw_state.mg_pll_bias);
3165 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3166 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03003167 }
3168 drm_modeset_unlock_all(dev);
3169
3170 return 0;
3171}
3172
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01003173static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01003174{
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003175 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilson8a68d462019-03-05 18:03:30 +00003176 const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003177 struct i915_wa *wa;
3178 unsigned int i;
Arun Siluvery888b5992014-08-26 14:44:51 +01003179
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003180 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3181 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
Chris Wilson548764b2018-06-15 13:02:07 +01003182 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00003183 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
Arun Siluvery888b5992014-08-26 14:44:51 +01003184
3185 return 0;
3186}
3187
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303188static int i915_ipc_status_show(struct seq_file *m, void *data)
3189{
3190 struct drm_i915_private *dev_priv = m->private;
3191
3192 seq_printf(m, "Isochronous Priority Control: %s\n",
3193 yesno(dev_priv->ipc_enabled));
3194 return 0;
3195}
3196
3197static int i915_ipc_status_open(struct inode *inode, struct file *file)
3198{
3199 struct drm_i915_private *dev_priv = inode->i_private;
3200
3201 if (!HAS_IPC(dev_priv))
3202 return -ENODEV;
3203
3204 return single_open(file, i915_ipc_status_show, dev_priv);
3205}
3206
3207static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3208 size_t len, loff_t *offp)
3209{
3210 struct seq_file *m = file->private_data;
3211 struct drm_i915_private *dev_priv = m->private;
Chris Wilsona0371212019-01-14 14:21:14 +00003212 intel_wakeref_t wakeref;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303213 bool enable;
Chris Wilsond4225a52019-01-14 14:21:23 +00003214 int ret;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303215
3216 ret = kstrtobool_from_user(ubuf, len, &enable);
3217 if (ret < 0)
3218 return ret;
3219
Chris Wilsond4225a52019-01-14 14:21:23 +00003220 with_intel_runtime_pm(dev_priv, wakeref) {
3221 if (!dev_priv->ipc_enabled && enable)
3222 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3223 dev_priv->wm.distrust_bios_wm = true;
3224 dev_priv->ipc_enabled = enable;
3225 intel_enable_ipc(dev_priv);
3226 }
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05303227
3228 return len;
3229}
3230
3231static const struct file_operations i915_ipc_status_fops = {
3232 .owner = THIS_MODULE,
3233 .open = i915_ipc_status_open,
3234 .read = seq_read,
3235 .llseek = seq_lseek,
3236 .release = single_release,
3237 .write = i915_ipc_status_write
3238};
3239
Damien Lespiauc5511e42014-11-04 17:06:51 +00003240static int i915_ddb_info(struct seq_file *m, void *unused)
3241{
David Weinehall36cdd012016-08-22 13:59:31 +03003242 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3243 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003244 struct skl_ddb_entry *entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003245 struct intel_crtc *crtc;
Damien Lespiauc5511e42014-11-04 17:06:51 +00003246
David Weinehall36cdd012016-08-22 13:59:31 +03003247 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00003248 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00003249
Damien Lespiauc5511e42014-11-04 17:06:51 +00003250 drm_modeset_lock_all(dev);
3251
Damien Lespiauc5511e42014-11-04 17:06:51 +00003252 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3253
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003254 for_each_intel_crtc(&dev_priv->drm, crtc) {
3255 struct intel_crtc_state *crtc_state =
3256 to_intel_crtc_state(crtc->base.state);
3257 enum pipe pipe = crtc->pipe;
3258 enum plane_id plane_id;
3259
Damien Lespiauc5511e42014-11-04 17:06:51 +00003260 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3261
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003262 for_each_plane_id_on_crtc(crtc, plane_id) {
3263 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3264 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
Damien Lespiauc5511e42014-11-04 17:06:51 +00003265 entry->start, entry->end,
3266 skl_ddb_entry_size(entry));
3267 }
3268
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003269 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003270 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3271 entry->end, skl_ddb_entry_size(entry));
3272 }
3273
3274 drm_modeset_unlock_all(dev);
3275
3276 return 0;
3277}
3278
Vandana Kannana54746e2015-03-03 20:53:10 +05303279static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003280 struct drm_device *dev,
3281 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303282{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003283 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303284 struct i915_drrs *drrs = &dev_priv->drrs;
3285 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003286 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003287 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303288
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003289 drm_connector_list_iter_begin(dev, &conn_iter);
3290 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003291 if (connector->state->crtc != &intel_crtc->base)
3292 continue;
3293
3294 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303295 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003296 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303297
3298 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3299 seq_puts(m, "\tVBT: DRRS_type: Static");
3300 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3301 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3302 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3303 seq_puts(m, "\tVBT: DRRS_type: None");
3304 else
3305 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3306
3307 seq_puts(m, "\n\n");
3308
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003309 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303310 struct intel_panel *panel;
3311
3312 mutex_lock(&drrs->mutex);
3313 /* DRRS Supported */
3314 seq_puts(m, "\tDRRS Supported: Yes\n");
3315
3316 /* disable_drrs() will make drrs->dp NULL */
3317 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303318 seq_puts(m, "Idleness DRRS: Disabled\n");
3319 if (dev_priv->psr.enabled)
3320 seq_puts(m,
3321 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303322 mutex_unlock(&drrs->mutex);
3323 return;
3324 }
3325
3326 panel = &drrs->dp->attached_connector->panel;
3327 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3328 drrs->busy_frontbuffer_bits);
3329
3330 seq_puts(m, "\n\t\t");
3331 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3332 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3333 vrefresh = panel->fixed_mode->vrefresh;
3334 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3335 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3336 vrefresh = panel->downclock_mode->vrefresh;
3337 } else {
3338 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3339 drrs->refresh_rate_type);
3340 mutex_unlock(&drrs->mutex);
3341 return;
3342 }
3343 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3344
3345 seq_puts(m, "\n\t\t");
3346 mutex_unlock(&drrs->mutex);
3347 } else {
3348 /* DRRS not supported. Print the VBT parameter*/
3349 seq_puts(m, "\tDRRS Supported : No");
3350 }
3351 seq_puts(m, "\n");
3352}
3353
3354static int i915_drrs_status(struct seq_file *m, void *unused)
3355{
David Weinehall36cdd012016-08-22 13:59:31 +03003356 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3357 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303358 struct intel_crtc *intel_crtc;
3359 int active_crtc_cnt = 0;
3360
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003361 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303362 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003363 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303364 active_crtc_cnt++;
3365 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3366
3367 drrs_status_per_crtc(m, dev, intel_crtc);
3368 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303369 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003370 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303371
3372 if (!active_crtc_cnt)
3373 seq_puts(m, "No active crtc found\n");
3374
3375 return 0;
3376}
3377
Dave Airlie11bed952014-05-12 15:22:27 +10003378static int i915_dp_mst_info(struct seq_file *m, void *unused)
3379{
David Weinehall36cdd012016-08-22 13:59:31 +03003380 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3381 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003382 struct intel_encoder *intel_encoder;
3383 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003384 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003385 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003386
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003387 drm_connector_list_iter_begin(dev, &conn_iter);
3388 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003389 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003390 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003391
3392 intel_encoder = intel_attached_encoder(connector);
3393 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3394 continue;
3395
3396 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003397 if (!intel_dig_port->dp.can_mst)
3398 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003399
Jim Bride40ae80c2016-04-14 10:18:37 -07003400 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003401 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003402 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3403 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003404 drm_connector_list_iter_end(&conn_iter);
3405
Dave Airlie11bed952014-05-12 15:22:27 +10003406 return 0;
3407}
3408
Todd Previteeb3394fa2015-04-18 00:04:19 -07003409static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003410 const char __user *ubuf,
3411 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003412{
3413 char *input_buffer;
3414 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003415 struct drm_device *dev;
3416 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003417 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003418 struct intel_dp *intel_dp;
3419 int val = 0;
3420
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303421 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003422
Todd Previteeb3394fa2015-04-18 00:04:19 -07003423 if (len == 0)
3424 return 0;
3425
Geliang Tang261aeba2017-05-06 23:40:17 +08003426 input_buffer = memdup_user_nul(ubuf, len);
3427 if (IS_ERR(input_buffer))
3428 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003429
Todd Previteeb3394fa2015-04-18 00:04:19 -07003430 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3431
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003432 drm_connector_list_iter_begin(dev, &conn_iter);
3433 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003434 struct intel_encoder *encoder;
3435
Todd Previteeb3394fa2015-04-18 00:04:19 -07003436 if (connector->connector_type !=
3437 DRM_MODE_CONNECTOR_DisplayPort)
3438 continue;
3439
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003440 encoder = to_intel_encoder(connector->encoder);
3441 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3442 continue;
3443
3444 if (encoder && connector->status == connector_status_connected) {
3445 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003446 status = kstrtoint(input_buffer, 10, &val);
3447 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003448 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003449 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3450 /* To prevent erroneous activation of the compliance
3451 * testing code, only accept an actual value of 1 here
3452 */
3453 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003454 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003455 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003456 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003457 }
3458 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003459 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003460 kfree(input_buffer);
3461 if (status < 0)
3462 return status;
3463
3464 *offp += len;
3465 return len;
3466}
3467
3468static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3469{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003470 struct drm_i915_private *dev_priv = m->private;
3471 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003472 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003473 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003474 struct intel_dp *intel_dp;
3475
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003476 drm_connector_list_iter_begin(dev, &conn_iter);
3477 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003478 struct intel_encoder *encoder;
3479
Todd Previteeb3394fa2015-04-18 00:04:19 -07003480 if (connector->connector_type !=
3481 DRM_MODE_CONNECTOR_DisplayPort)
3482 continue;
3483
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003484 encoder = to_intel_encoder(connector->encoder);
3485 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3486 continue;
3487
3488 if (encoder && connector->status == connector_status_connected) {
3489 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003490 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003491 seq_puts(m, "1");
3492 else
3493 seq_puts(m, "0");
3494 } else
3495 seq_puts(m, "0");
3496 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003497 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003498
3499 return 0;
3500}
3501
3502static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003503 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003504{
David Weinehall36cdd012016-08-22 13:59:31 +03003505 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003506 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003507}
3508
3509static const struct file_operations i915_displayport_test_active_fops = {
3510 .owner = THIS_MODULE,
3511 .open = i915_displayport_test_active_open,
3512 .read = seq_read,
3513 .llseek = seq_lseek,
3514 .release = single_release,
3515 .write = i915_displayport_test_active_write
3516};
3517
3518static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3519{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003520 struct drm_i915_private *dev_priv = m->private;
3521 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003522 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003523 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003524 struct intel_dp *intel_dp;
3525
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003526 drm_connector_list_iter_begin(dev, &conn_iter);
3527 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003528 struct intel_encoder *encoder;
3529
Todd Previteeb3394fa2015-04-18 00:04:19 -07003530 if (connector->connector_type !=
3531 DRM_MODE_CONNECTOR_DisplayPort)
3532 continue;
3533
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003534 encoder = to_intel_encoder(connector->encoder);
3535 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3536 continue;
3537
3538 if (encoder && connector->status == connector_status_connected) {
3539 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003540 if (intel_dp->compliance.test_type ==
3541 DP_TEST_LINK_EDID_READ)
3542 seq_printf(m, "%lx",
3543 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003544 else if (intel_dp->compliance.test_type ==
3545 DP_TEST_LINK_VIDEO_PATTERN) {
3546 seq_printf(m, "hdisplay: %d\n",
3547 intel_dp->compliance.test_data.hdisplay);
3548 seq_printf(m, "vdisplay: %d\n",
3549 intel_dp->compliance.test_data.vdisplay);
3550 seq_printf(m, "bpc: %u\n",
3551 intel_dp->compliance.test_data.bpc);
3552 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003553 } else
3554 seq_puts(m, "0");
3555 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003556 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003557
3558 return 0;
3559}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003560DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003561
3562static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3563{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003564 struct drm_i915_private *dev_priv = m->private;
3565 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003566 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003567 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003568 struct intel_dp *intel_dp;
3569
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003570 drm_connector_list_iter_begin(dev, &conn_iter);
3571 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003572 struct intel_encoder *encoder;
3573
Todd Previteeb3394fa2015-04-18 00:04:19 -07003574 if (connector->connector_type !=
3575 DRM_MODE_CONNECTOR_DisplayPort)
3576 continue;
3577
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003578 encoder = to_intel_encoder(connector->encoder);
3579 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3580 continue;
3581
3582 if (encoder && connector->status == connector_status_connected) {
3583 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003584 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003585 } else
3586 seq_puts(m, "0");
3587 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003588 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003589
3590 return 0;
3591}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003592DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003593
Jani Nikulae5315212019-01-16 11:15:23 +02003594static void wm_latency_show(struct seq_file *m, const u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003595{
David Weinehall36cdd012016-08-22 13:59:31 +03003596 struct drm_i915_private *dev_priv = m->private;
3597 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003598 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003599 int num_levels;
3600
David Weinehall36cdd012016-08-22 13:59:31 +03003601 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003602 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003603 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003604 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003605 else if (IS_G4X(dev_priv))
3606 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003607 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003608 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003609
3610 drm_modeset_lock_all(dev);
3611
3612 for (level = 0; level < num_levels; level++) {
3613 unsigned int latency = wm[level];
3614
Damien Lespiau97e94b22014-11-04 17:06:50 +00003615 /*
3616 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003617 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003618 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003619 if (INTEL_GEN(dev_priv) >= 9 ||
3620 IS_VALLEYVIEW(dev_priv) ||
3621 IS_CHERRYVIEW(dev_priv) ||
3622 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003623 latency *= 10;
3624 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003625 latency *= 5;
3626
3627 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003628 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003629 }
3630
3631 drm_modeset_unlock_all(dev);
3632}
3633
3634static int pri_wm_latency_show(struct seq_file *m, void *data)
3635{
David Weinehall36cdd012016-08-22 13:59:31 +03003636 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003637 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003638
David Weinehall36cdd012016-08-22 13:59:31 +03003639 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003640 latencies = dev_priv->wm.skl_latency;
3641 else
David Weinehall36cdd012016-08-22 13:59:31 +03003642 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003643
3644 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003645
3646 return 0;
3647}
3648
3649static int spr_wm_latency_show(struct seq_file *m, void *data)
3650{
David Weinehall36cdd012016-08-22 13:59:31 +03003651 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003652 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003653
David Weinehall36cdd012016-08-22 13:59:31 +03003654 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003655 latencies = dev_priv->wm.skl_latency;
3656 else
David Weinehall36cdd012016-08-22 13:59:31 +03003657 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003658
3659 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003660
3661 return 0;
3662}
3663
3664static int cur_wm_latency_show(struct seq_file *m, void *data)
3665{
David Weinehall36cdd012016-08-22 13:59:31 +03003666 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003667 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003668
David Weinehall36cdd012016-08-22 13:59:31 +03003669 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003670 latencies = dev_priv->wm.skl_latency;
3671 else
David Weinehall36cdd012016-08-22 13:59:31 +03003672 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003673
3674 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003675
3676 return 0;
3677}
3678
3679static int pri_wm_latency_open(struct inode *inode, struct file *file)
3680{
David Weinehall36cdd012016-08-22 13:59:31 +03003681 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003682
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003683 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003684 return -ENODEV;
3685
David Weinehall36cdd012016-08-22 13:59:31 +03003686 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003687}
3688
3689static int spr_wm_latency_open(struct inode *inode, struct file *file)
3690{
David Weinehall36cdd012016-08-22 13:59:31 +03003691 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003692
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003693 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003694 return -ENODEV;
3695
David Weinehall36cdd012016-08-22 13:59:31 +03003696 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003697}
3698
3699static int cur_wm_latency_open(struct inode *inode, struct file *file)
3700{
David Weinehall36cdd012016-08-22 13:59:31 +03003701 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003702
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003703 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003704 return -ENODEV;
3705
David Weinehall36cdd012016-08-22 13:59:31 +03003706 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003707}
3708
3709static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Jani Nikulae5315212019-01-16 11:15:23 +02003710 size_t len, loff_t *offp, u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003711{
3712 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003713 struct drm_i915_private *dev_priv = m->private;
3714 struct drm_device *dev = &dev_priv->drm;
Jani Nikulae5315212019-01-16 11:15:23 +02003715 u16 new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003716 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003717 int level;
3718 int ret;
3719 char tmp[32];
3720
David Weinehall36cdd012016-08-22 13:59:31 +03003721 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003722 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003723 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003724 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003725 else if (IS_G4X(dev_priv))
3726 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003727 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003728 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003729
Ville Syrjälä369a1342014-01-22 14:36:08 +02003730 if (len >= sizeof(tmp))
3731 return -EINVAL;
3732
3733 if (copy_from_user(tmp, ubuf, len))
3734 return -EFAULT;
3735
3736 tmp[len] = '\0';
3737
Damien Lespiau97e94b22014-11-04 17:06:50 +00003738 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3739 &new[0], &new[1], &new[2], &new[3],
3740 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003741 if (ret != num_levels)
3742 return -EINVAL;
3743
3744 drm_modeset_lock_all(dev);
3745
3746 for (level = 0; level < num_levels; level++)
3747 wm[level] = new[level];
3748
3749 drm_modeset_unlock_all(dev);
3750
3751 return len;
3752}
3753
3754
3755static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3756 size_t len, loff_t *offp)
3757{
3758 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003759 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003760 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003761
David Weinehall36cdd012016-08-22 13:59:31 +03003762 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003763 latencies = dev_priv->wm.skl_latency;
3764 else
David Weinehall36cdd012016-08-22 13:59:31 +03003765 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003766
3767 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003768}
3769
3770static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3771 size_t len, loff_t *offp)
3772{
3773 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003774 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003775 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003776
David Weinehall36cdd012016-08-22 13:59:31 +03003777 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003778 latencies = dev_priv->wm.skl_latency;
3779 else
David Weinehall36cdd012016-08-22 13:59:31 +03003780 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003781
3782 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003783}
3784
3785static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3786 size_t len, loff_t *offp)
3787{
3788 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003789 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003790 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003791
David Weinehall36cdd012016-08-22 13:59:31 +03003792 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003793 latencies = dev_priv->wm.skl_latency;
3794 else
David Weinehall36cdd012016-08-22 13:59:31 +03003795 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003796
3797 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003798}
3799
3800static const struct file_operations i915_pri_wm_latency_fops = {
3801 .owner = THIS_MODULE,
3802 .open = pri_wm_latency_open,
3803 .read = seq_read,
3804 .llseek = seq_lseek,
3805 .release = single_release,
3806 .write = pri_wm_latency_write
3807};
3808
3809static const struct file_operations i915_spr_wm_latency_fops = {
3810 .owner = THIS_MODULE,
3811 .open = spr_wm_latency_open,
3812 .read = seq_read,
3813 .llseek = seq_lseek,
3814 .release = single_release,
3815 .write = spr_wm_latency_write
3816};
3817
3818static const struct file_operations i915_cur_wm_latency_fops = {
3819 .owner = THIS_MODULE,
3820 .open = cur_wm_latency_open,
3821 .read = seq_read,
3822 .llseek = seq_lseek,
3823 .release = single_release,
3824 .write = cur_wm_latency_write
3825};
3826
Kees Cook647416f2013-03-10 14:10:06 -07003827static int
3828i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003829{
Chris Wilsonc41166f2019-02-20 14:56:37 +00003830 int ret = i915_terminally_wedged(data);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003831
Chris Wilsonc41166f2019-02-20 14:56:37 +00003832 switch (ret) {
3833 case -EIO:
3834 *val = 1;
3835 return 0;
3836 case 0:
3837 *val = 0;
3838 return 0;
3839 default:
3840 return ret;
3841 }
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003842}
3843
Kees Cook647416f2013-03-10 14:10:06 -07003844static int
3845i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003846{
Chris Wilson598b6b52017-03-25 13:47:35 +00003847 struct drm_i915_private *i915 = data;
Imre Deakd46c0512014-04-14 20:24:27 +03003848
Chris Wilson15cbf002019-02-08 15:37:06 +00003849 /* Flush any previous reset before applying for a new one */
3850 wait_event(i915->gpu_error.reset_queue,
3851 !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003852
Chris Wilsonce800752018-03-20 10:04:49 +00003853 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3854 "Manually set wedged engine mask = %llx", val);
Kees Cook647416f2013-03-10 14:10:06 -07003855 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003856}
3857
Kees Cook647416f2013-03-10 14:10:06 -07003858DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3859 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03003860 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003861
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003862#define DROP_UNBOUND BIT(0)
3863#define DROP_BOUND BIT(1)
3864#define DROP_RETIRE BIT(2)
3865#define DROP_ACTIVE BIT(3)
3866#define DROP_FREED BIT(4)
3867#define DROP_SHRINK_ALL BIT(5)
3868#define DROP_IDLE BIT(6)
Chris Wilson6b048702018-09-03 09:33:37 +01003869#define DROP_RESET_ACTIVE BIT(7)
3870#define DROP_RESET_SEQNO BIT(8)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003871#define DROP_ALL (DROP_UNBOUND | \
3872 DROP_BOUND | \
3873 DROP_RETIRE | \
3874 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00003875 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003876 DROP_SHRINK_ALL |\
Chris Wilson6b048702018-09-03 09:33:37 +01003877 DROP_IDLE | \
3878 DROP_RESET_ACTIVE | \
3879 DROP_RESET_SEQNO)
Kees Cook647416f2013-03-10 14:10:06 -07003880static int
3881i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003882{
Kees Cook647416f2013-03-10 14:10:06 -07003883 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00003884
Kees Cook647416f2013-03-10 14:10:06 -07003885 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003886}
3887
Kees Cook647416f2013-03-10 14:10:06 -07003888static int
3889i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003890{
Chris Wilson6b048702018-09-03 09:33:37 +01003891 struct drm_i915_private *i915 = data;
Chris Wilsondd624af2013-01-15 12:39:35 +00003892
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003893 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3894 val, val & DROP_ALL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003895
Chris Wilsonad4062d2019-01-28 01:02:18 +00003896 if (val & DROP_RESET_ACTIVE &&
3897 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
Chris Wilson6b048702018-09-03 09:33:37 +01003898 i915_gem_set_wedged(i915);
3899
Chris Wilsondd624af2013-01-15 12:39:35 +00003900 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3901 * on ioctls on -EAGAIN. */
Chris Wilson6b048702018-09-03 09:33:37 +01003902 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
Chris Wilson6cffeb82019-03-18 09:51:49 +00003903 int ret;
3904
Chris Wilson6b048702018-09-03 09:33:37 +01003905 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00003906 if (ret)
Chris Wilson6cffeb82019-03-18 09:51:49 +00003907 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00003908
Chris Wilson00c26cf2017-05-24 17:26:53 +01003909 if (val & DROP_ACTIVE)
Chris Wilson6b048702018-09-03 09:33:37 +01003910 ret = i915_gem_wait_for_idle(i915,
Chris Wilson00c26cf2017-05-24 17:26:53 +01003911 I915_WAIT_INTERRUPTIBLE |
Chris Wilsonec625fb2018-07-09 13:20:42 +01003912 I915_WAIT_LOCKED,
3913 MAX_SCHEDULE_TIMEOUT);
Chris Wilson00c26cf2017-05-24 17:26:53 +01003914
Chris Wilson6b048702018-09-03 09:33:37 +01003915 if (val & DROP_RETIRE)
3916 i915_retire_requests(i915);
3917
3918 mutex_unlock(&i915->drm.struct_mutex);
3919 }
3920
Chris Wilsonc41166f2019-02-20 14:56:37 +00003921 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
Chris Wilson6b048702018-09-03 09:33:37 +01003922 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003923
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003924 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01003925 if (val & DROP_BOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003926 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01003927
Chris Wilson21ab4e72014-09-09 11:16:08 +01003928 if (val & DROP_UNBOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003929 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00003930
Chris Wilson8eadc192017-03-08 14:46:22 +00003931 if (val & DROP_SHRINK_ALL)
Chris Wilson6b048702018-09-03 09:33:37 +01003932 i915_gem_shrink_all(i915);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003933 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00003934
Chris Wilson4dfacb02018-05-31 09:22:43 +01003935 if (val & DROP_IDLE) {
3936 do {
Chris Wilson79ffac852019-04-24 21:07:17 +01003937 flush_delayed_work(&i915->gem.retire_work);
Chris Wilson23c3c3d2019-04-24 21:07:14 +01003938 drain_delayed_work(&i915->gem.idle_work);
Chris Wilson6b048702018-09-03 09:33:37 +01003939 } while (READ_ONCE(i915->gt.awake));
Chris Wilson4dfacb02018-05-31 09:22:43 +01003940 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003941
Chris Wilsonc9c704712018-02-19 22:06:31 +00003942 if (val & DROP_FREED)
Chris Wilson6b048702018-09-03 09:33:37 +01003943 i915_gem_drain_freed_objects(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003944
Chris Wilson6cffeb82019-03-18 09:51:49 +00003945 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003946}
3947
Kees Cook647416f2013-03-10 14:10:06 -07003948DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3949 i915_drop_caches_get, i915_drop_caches_set,
3950 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00003951
Kees Cook647416f2013-03-10 14:10:06 -07003952static int
Kees Cook647416f2013-03-10 14:10:06 -07003953i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003954{
David Weinehall36cdd012016-08-22 13:59:31 +03003955 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003956 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00003957 u32 snpcr = 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003958
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003959 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003960 return -ENODEV;
3961
Chris Wilsond4225a52019-01-14 14:21:23 +00003962 with_intel_runtime_pm(dev_priv, wakeref)
3963 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003964
Kees Cook647416f2013-03-10 14:10:06 -07003965 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003966
Kees Cook647416f2013-03-10 14:10:06 -07003967 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003968}
3969
Kees Cook647416f2013-03-10 14:10:06 -07003970static int
3971i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003972{
David Weinehall36cdd012016-08-22 13:59:31 +03003973 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003974 intel_wakeref_t wakeref;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003975
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003976 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003977 return -ENODEV;
3978
Kees Cook647416f2013-03-10 14:10:06 -07003979 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003980 return -EINVAL;
3981
Kees Cook647416f2013-03-10 14:10:06 -07003982 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Chris Wilsond4225a52019-01-14 14:21:23 +00003983 with_intel_runtime_pm(dev_priv, wakeref) {
3984 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003985
Chris Wilsond4225a52019-01-14 14:21:23 +00003986 /* Update the cache sharing policy here as well */
3987 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3988 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3989 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3990 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3991 }
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003992
Kees Cook647416f2013-03-10 14:10:06 -07003993 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003994}
3995
Kees Cook647416f2013-03-10 14:10:06 -07003996DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3997 i915_cache_sharing_get, i915_cache_sharing_set,
3998 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003999
David Weinehall36cdd012016-08-22 13:59:31 +03004000static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004001 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004002{
Chris Wilson7aa0b142018-03-13 00:40:54 +00004003#define SS_MAX 2
4004 const int ss_max = SS_MAX;
4005 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07004006 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004007
4008 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4009 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4010 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4011 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4012
4013 for (ss = 0; ss < ss_max; ss++) {
4014 unsigned int eu_cnt;
4015
4016 if (sig1[ss] & CHV_SS_PG_ENABLE)
4017 /* skip disabled subslice */
4018 continue;
4019
Imre Deakf08a0c92016-08-31 19:13:04 +03004020 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004021 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07004022 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4023 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4024 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4025 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03004026 sseu->eu_total += eu_cnt;
4027 sseu->eu_per_subslice = max_t(unsigned int,
4028 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004029 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00004030#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004031}
4032
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004033static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4034 struct sseu_dev_info *sseu)
4035{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004036#define SS_MAX 6
Jani Nikula02584042018-12-31 16:56:41 +02004037 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004038 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004039 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004040
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004041 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004042 /*
4043 * FIXME: Valid SS Mask respects the spec and read
Alexandre Belloni3c64ea82018-11-20 16:14:15 +01004044 * only valid bits for those registers, excluding reserved
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004045 * although this seems wrong because it would leave many
4046 * subslices without ACK.
4047 */
4048 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4049 GEN10_PGCTL_VALID_SS_MASK(s);
4050 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4051 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4052 }
4053
4054 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4055 GEN9_PGCTL_SSA_EU19_ACK |
4056 GEN9_PGCTL_SSA_EU210_ACK |
4057 GEN9_PGCTL_SSA_EU311_ACK;
4058 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4059 GEN9_PGCTL_SSB_EU19_ACK |
4060 GEN9_PGCTL_SSB_EU210_ACK |
4061 GEN9_PGCTL_SSB_EU311_ACK;
4062
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004063 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004064 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4065 /* skip disabled slice */
4066 continue;
4067
4068 sseu->slice_mask |= BIT(s);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004069 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004070
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004071 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004072 unsigned int eu_cnt;
4073
4074 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4075 /* skip disabled subslice */
4076 continue;
4077
4078 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4079 eu_mask[ss % 2]);
4080 sseu->eu_total += eu_cnt;
4081 sseu->eu_per_subslice = max_t(unsigned int,
4082 sseu->eu_per_subslice,
4083 eu_cnt);
4084 }
4085 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004086#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004087}
4088
David Weinehall36cdd012016-08-22 13:59:31 +03004089static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004090 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07004091{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004092#define SS_MAX 3
Jani Nikula02584042018-12-31 16:56:41 +02004093 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004094 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07004095 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07004096
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004097 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07004098 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4099 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4100 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4101 }
4102
Jeff McGee5d395252015-04-03 18:13:17 -07004103 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4104 GEN9_PGCTL_SSA_EU19_ACK |
4105 GEN9_PGCTL_SSA_EU210_ACK |
4106 GEN9_PGCTL_SSA_EU311_ACK;
4107 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4108 GEN9_PGCTL_SSB_EU19_ACK |
4109 GEN9_PGCTL_SSB_EU210_ACK |
4110 GEN9_PGCTL_SSB_EU311_ACK;
4111
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004112 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004113 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4114 /* skip disabled slice */
4115 continue;
4116
Imre Deakf08a0c92016-08-31 19:13:04 +03004117 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07004118
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07004119 if (IS_GEN9_BC(dev_priv))
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004120 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004121 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07004122
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00004123 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07004124 unsigned int eu_cnt;
4125
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004126 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03004127 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4128 /* skip disabled subslice */
4129 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07004130
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004131 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03004132 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07004133
Jeff McGee5d395252015-04-03 18:13:17 -07004134 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4135 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03004136 sseu->eu_total += eu_cnt;
4137 sseu->eu_per_subslice = max_t(unsigned int,
4138 sseu->eu_per_subslice,
4139 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07004140 }
4141 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00004142#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07004143}
4144
David Weinehall36cdd012016-08-22 13:59:31 +03004145static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03004146 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004147{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004148 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03004149 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004150
Imre Deakf08a0c92016-08-31 19:13:04 +03004151 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004152
Imre Deakf08a0c92016-08-31 19:13:04 +03004153 if (sseu->slice_mask) {
Imre Deak43b67992016-08-31 19:13:02 +03004154 sseu->eu_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004155 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004156 for (s = 0; s < fls(sseu->slice_mask); s++) {
4157 sseu->subslice_mask[s] =
Jani Nikula02584042018-12-31 16:56:41 +02004158 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004159 }
Imre Deak57ec1712016-08-31 19:13:05 +03004160 sseu->eu_total = sseu->eu_per_subslice *
4161 sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004162
4163 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03004164 for (s = 0; s < fls(sseu->slice_mask); s++) {
Imre Deak43b67992016-08-31 19:13:02 +03004165 u8 subslice_7eu =
Jani Nikula02584042018-12-31 16:56:41 +02004166 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004167
Imre Deak915490d2016-08-31 19:13:01 +03004168 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02004169 }
4170 }
4171}
4172
Imre Deak615d8902016-08-31 19:13:03 +03004173static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4174 const struct sseu_dev_info *sseu)
4175{
4176 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4177 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004178 int s;
Imre Deak615d8902016-08-31 19:13:03 +03004179
Imre Deakc67ba532016-08-31 19:13:06 +03004180 seq_printf(m, " %s Slice Mask: %04x\n", type,
4181 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03004182 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03004183 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03004184 seq_printf(m, " %s Subslice Total: %u\n", type,
Imre Deak57ec1712016-08-31 19:13:05 +03004185 sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004186 for (s = 0; s < fls(sseu->slice_mask); s++) {
4187 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4188 s, hweight8(sseu->subslice_mask[s]));
4189 }
Imre Deak615d8902016-08-31 19:13:03 +03004190 seq_printf(m, " %s EU Total: %u\n", type,
4191 sseu->eu_total);
4192 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4193 sseu->eu_per_subslice);
4194
4195 if (!is_available_info)
4196 return;
4197
4198 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4199 if (HAS_POOLED_EU(dev_priv))
4200 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4201
4202 seq_printf(m, " Has Slice Power Gating: %s\n",
4203 yesno(sseu->has_slice_pg));
4204 seq_printf(m, " Has Subslice Power Gating: %s\n",
4205 yesno(sseu->has_subslice_pg));
4206 seq_printf(m, " Has EU Power Gating: %s\n",
4207 yesno(sseu->has_eu_pg));
4208}
4209
Jeff McGee38732182015-02-13 10:27:54 -06004210static int i915_sseu_status(struct seq_file *m, void *unused)
4211{
David Weinehall36cdd012016-08-22 13:59:31 +03004212 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03004213 struct sseu_dev_info sseu;
Chris Wilsona0371212019-01-14 14:21:14 +00004214 intel_wakeref_t wakeref;
Jeff McGee38732182015-02-13 10:27:54 -06004215
David Weinehall36cdd012016-08-22 13:59:31 +03004216 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06004217 return -ENODEV;
4218
4219 seq_puts(m, "SSEU Device Info\n");
Jani Nikula02584042018-12-31 16:56:41 +02004220 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06004221
Jeff McGee7f992ab2015-02-13 10:27:55 -06004222 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03004223 memset(&sseu, 0, sizeof(sseu));
Jani Nikula02584042018-12-31 16:56:41 +02004224 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4225 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00004226 sseu.max_eus_per_subslice =
Jani Nikula02584042018-12-31 16:56:41 +02004227 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03004228
Chris Wilsond4225a52019-01-14 14:21:23 +00004229 with_intel_runtime_pm(dev_priv, wakeref) {
4230 if (IS_CHERRYVIEW(dev_priv))
4231 cherryview_sseu_device_status(dev_priv, &sseu);
4232 else if (IS_BROADWELL(dev_priv))
4233 broadwell_sseu_device_status(dev_priv, &sseu);
4234 else if (IS_GEN(dev_priv, 9))
4235 gen9_sseu_device_status(dev_priv, &sseu);
4236 else if (INTEL_GEN(dev_priv) >= 10)
4237 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004238 }
David Weinehall238010e2016-08-01 17:33:27 +03004239
Imre Deak615d8902016-08-31 19:13:03 +03004240 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06004241
Jeff McGee38732182015-02-13 10:27:54 -06004242 return 0;
4243}
4244
Ben Widawsky6d794d42011-04-25 11:25:56 -07004245static int i915_forcewake_open(struct inode *inode, struct file *file)
4246{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004247 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004248
Chris Wilsond7a133d2017-09-07 14:44:41 +01004249 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004250 return 0;
4251
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004252 file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07004253 intel_uncore_forcewake_user_get(&i915->uncore);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004254
4255 return 0;
4256}
4257
Ben Widawskyc43b5632012-04-16 14:07:40 -07004258static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004259{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004260 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004261
Chris Wilsond7a133d2017-09-07 14:44:41 +01004262 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004263 return 0;
4264
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07004265 intel_uncore_forcewake_user_put(&i915->uncore);
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004266 intel_runtime_pm_put(i915,
4267 (intel_wakeref_t)(uintptr_t)file->private_data);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004268
4269 return 0;
4270}
4271
4272static const struct file_operations i915_forcewake_fops = {
4273 .owner = THIS_MODULE,
4274 .open = i915_forcewake_open,
4275 .release = i915_forcewake_release,
4276};
4277
Lyude317eaa92017-02-03 21:18:25 -05004278static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4279{
4280 struct drm_i915_private *dev_priv = m->private;
4281 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4282
Lyude Paul6fc5d782018-11-20 19:37:17 -05004283 /* Synchronize with everything first in case there's been an HPD
4284 * storm, but we haven't finished handling it in the kernel yet
4285 */
4286 synchronize_irq(dev_priv->drm.irq);
4287 flush_work(&dev_priv->hotplug.dig_port_work);
4288 flush_work(&dev_priv->hotplug.hotplug_work);
4289
Lyude317eaa92017-02-03 21:18:25 -05004290 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4291 seq_printf(m, "Detected: %s\n",
4292 yesno(delayed_work_pending(&hotplug->reenable_work)));
4293
4294 return 0;
4295}
4296
4297static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4298 const char __user *ubuf, size_t len,
4299 loff_t *offp)
4300{
4301 struct seq_file *m = file->private_data;
4302 struct drm_i915_private *dev_priv = m->private;
4303 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4304 unsigned int new_threshold;
4305 int i;
4306 char *newline;
4307 char tmp[16];
4308
4309 if (len >= sizeof(tmp))
4310 return -EINVAL;
4311
4312 if (copy_from_user(tmp, ubuf, len))
4313 return -EFAULT;
4314
4315 tmp[len] = '\0';
4316
4317 /* Strip newline, if any */
4318 newline = strchr(tmp, '\n');
4319 if (newline)
4320 *newline = '\0';
4321
4322 if (strcmp(tmp, "reset") == 0)
4323 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4324 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4325 return -EINVAL;
4326
4327 if (new_threshold > 0)
4328 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4329 new_threshold);
4330 else
4331 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4332
4333 spin_lock_irq(&dev_priv->irq_lock);
4334 hotplug->hpd_storm_threshold = new_threshold;
4335 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4336 for_each_hpd_pin(i)
4337 hotplug->stats[i].count = 0;
4338 spin_unlock_irq(&dev_priv->irq_lock);
4339
4340 /* Re-enable hpd immediately if we were in an irq storm */
4341 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4342
4343 return len;
4344}
4345
4346static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4347{
4348 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4349}
4350
4351static const struct file_operations i915_hpd_storm_ctl_fops = {
4352 .owner = THIS_MODULE,
4353 .open = i915_hpd_storm_ctl_open,
4354 .read = seq_read,
4355 .llseek = seq_lseek,
4356 .release = single_release,
4357 .write = i915_hpd_storm_ctl_write
4358};
4359
Lyude Paul9a64c652018-11-06 16:30:16 -05004360static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4361{
4362 struct drm_i915_private *dev_priv = m->private;
4363
4364 seq_printf(m, "Enabled: %s\n",
4365 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4366
4367 return 0;
4368}
4369
4370static int
4371i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4372{
4373 return single_open(file, i915_hpd_short_storm_ctl_show,
4374 inode->i_private);
4375}
4376
4377static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4378 const char __user *ubuf,
4379 size_t len, loff_t *offp)
4380{
4381 struct seq_file *m = file->private_data;
4382 struct drm_i915_private *dev_priv = m->private;
4383 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4384 char *newline;
4385 char tmp[16];
4386 int i;
4387 bool new_state;
4388
4389 if (len >= sizeof(tmp))
4390 return -EINVAL;
4391
4392 if (copy_from_user(tmp, ubuf, len))
4393 return -EFAULT;
4394
4395 tmp[len] = '\0';
4396
4397 /* Strip newline, if any */
4398 newline = strchr(tmp, '\n');
4399 if (newline)
4400 *newline = '\0';
4401
4402 /* Reset to the "default" state for this system */
4403 if (strcmp(tmp, "reset") == 0)
4404 new_state = !HAS_DP_MST(dev_priv);
4405 else if (kstrtobool(tmp, &new_state) != 0)
4406 return -EINVAL;
4407
4408 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4409 new_state ? "En" : "Dis");
4410
4411 spin_lock_irq(&dev_priv->irq_lock);
4412 hotplug->hpd_short_storm_enabled = new_state;
4413 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4414 for_each_hpd_pin(i)
4415 hotplug->stats[i].count = 0;
4416 spin_unlock_irq(&dev_priv->irq_lock);
4417
4418 /* Re-enable hpd immediately if we were in an irq storm */
4419 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4420
4421 return len;
4422}
4423
4424static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4425 .owner = THIS_MODULE,
4426 .open = i915_hpd_short_storm_ctl_open,
4427 .read = seq_read,
4428 .llseek = seq_lseek,
4429 .release = single_release,
4430 .write = i915_hpd_short_storm_ctl_write,
4431};
4432
C, Ramalingam35954e82017-11-08 00:08:23 +05304433static int i915_drrs_ctl_set(void *data, u64 val)
4434{
4435 struct drm_i915_private *dev_priv = data;
4436 struct drm_device *dev = &dev_priv->drm;
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004437 struct intel_crtc *crtc;
C, Ramalingam35954e82017-11-08 00:08:23 +05304438
4439 if (INTEL_GEN(dev_priv) < 7)
4440 return -ENODEV;
4441
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004442 for_each_intel_crtc(dev, crtc) {
4443 struct drm_connector_list_iter conn_iter;
4444 struct intel_crtc_state *crtc_state;
4445 struct drm_connector *connector;
4446 struct drm_crtc_commit *commit;
4447 int ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304448
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004449 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4450 if (ret)
4451 return ret;
4452
4453 crtc_state = to_intel_crtc_state(crtc->base.state);
4454
4455 if (!crtc_state->base.active ||
4456 !crtc_state->has_drrs)
4457 goto out;
4458
4459 commit = crtc_state->base.commit;
4460 if (commit) {
4461 ret = wait_for_completion_interruptible(&commit->hw_done);
4462 if (ret)
4463 goto out;
4464 }
4465
4466 drm_connector_list_iter_begin(dev, &conn_iter);
4467 drm_for_each_connector_iter(connector, &conn_iter) {
4468 struct intel_encoder *encoder;
4469 struct intel_dp *intel_dp;
4470
4471 if (!(crtc_state->base.connector_mask &
4472 drm_connector_mask(connector)))
4473 continue;
4474
4475 encoder = intel_attached_encoder(connector);
C, Ramalingam35954e82017-11-08 00:08:23 +05304476 if (encoder->type != INTEL_OUTPUT_EDP)
4477 continue;
4478
4479 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4480 val ? "en" : "dis", val);
4481
4482 intel_dp = enc_to_intel_dp(&encoder->base);
4483 if (val)
4484 intel_edp_drrs_enable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004485 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304486 else
4487 intel_edp_drrs_disable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004488 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304489 }
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004490 drm_connector_list_iter_end(&conn_iter);
4491
4492out:
4493 drm_modeset_unlock(&crtc->base.mutex);
4494 if (ret)
4495 return ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304496 }
C, Ramalingam35954e82017-11-08 00:08:23 +05304497
4498 return 0;
4499}
4500
4501DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4502
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004503static ssize_t
4504i915_fifo_underrun_reset_write(struct file *filp,
4505 const char __user *ubuf,
4506 size_t cnt, loff_t *ppos)
4507{
4508 struct drm_i915_private *dev_priv = filp->private_data;
4509 struct intel_crtc *intel_crtc;
4510 struct drm_device *dev = &dev_priv->drm;
4511 int ret;
4512 bool reset;
4513
4514 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4515 if (ret)
4516 return ret;
4517
4518 if (!reset)
4519 return cnt;
4520
4521 for_each_intel_crtc(dev, intel_crtc) {
4522 struct drm_crtc_commit *commit;
4523 struct intel_crtc_state *crtc_state;
4524
4525 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4526 if (ret)
4527 return ret;
4528
4529 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4530 commit = crtc_state->base.commit;
4531 if (commit) {
4532 ret = wait_for_completion_interruptible(&commit->hw_done);
4533 if (!ret)
4534 ret = wait_for_completion_interruptible(&commit->flip_done);
4535 }
4536
4537 if (!ret && crtc_state->base.active) {
4538 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4539 pipe_name(intel_crtc->pipe));
4540
4541 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4542 }
4543
4544 drm_modeset_unlock(&intel_crtc->base.mutex);
4545
4546 if (ret)
4547 return ret;
4548 }
4549
4550 ret = intel_fbc_reset_underrun(dev_priv);
4551 if (ret)
4552 return ret;
4553
4554 return cnt;
4555}
4556
4557static const struct file_operations i915_fifo_underrun_reset_ops = {
4558 .owner = THIS_MODULE,
4559 .open = simple_open,
4560 .write = i915_fifo_underrun_reset_write,
4561 .llseek = default_llseek,
4562};
4563
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004564static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004565 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004566 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilson08c18322011-01-10 00:00:24 +00004567 {"i915_gem_gtt", i915_gem_gtt_info, 0},
Chris Wilson6d2b88852013-08-07 18:30:54 +01004568 {"i915_gem_stolen", i915_gem_stolen_list_info },
Chris Wilsona6172a82009-02-11 14:26:38 +00004569 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004570 {"i915_gem_interrupt", i915_interrupt_info, 0},
Brad Volkin493018d2014-12-11 12:13:08 -08004571 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004572 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004573 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004574 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004575 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004576 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004577 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304578 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004579 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Michel Thierry061d06a2017-06-20 10:57:49 +01004580 {"i915_reset_info", i915_reset_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004581 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes7648fa92010-05-20 14:28:11 -07004582 {"i915_emon_status", i915_emon_status, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004583 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004584 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004585 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004586 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004587 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004588 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004589 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004590 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004591 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004592 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004593 {"i915_swizzle_info", i915_swizzle_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004594 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004595 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004596 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004597 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004598 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004599 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004600 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004601 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004602 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004603 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004604 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004605 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004606 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004607 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004608 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304609 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004610 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004611};
Ben Gamari27c202a2009-07-01 22:26:52 -04004612#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004613
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004614static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004615 const char *name;
4616 const struct file_operations *fops;
4617} i915_debugfs_files[] = {
4618 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004619 {"i915_cache_sharing", &i915_cache_sharing_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004620 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004621#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004622 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004623 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004624#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004625 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004626 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4627 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4628 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004629 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004630 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4631 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304632 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004633 {"i915_guc_log_level", &i915_guc_log_level_fops},
4634 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304635 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
Lyude Paul9a64c652018-11-06 16:30:16 -05004636 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304637 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004638 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4639 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004640};
4641
Chris Wilson1dac8912016-06-24 14:00:17 +01004642int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004643{
Chris Wilson91c8a322016-07-05 10:40:23 +01004644 struct drm_minor *minor = dev_priv->drm.primary;
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004645 struct dentry *ent;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004646 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004647
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004648 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4649 minor->debugfs_root, to_i915(minor->dev),
4650 &i915_forcewake_fops);
4651 if (!ent)
4652 return -ENOMEM;
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004653
Daniel Vetter34b96742013-07-04 20:49:44 +02004654 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004655 ent = debugfs_create_file(i915_debugfs_files[i].name,
4656 S_IRUGO | S_IWUSR,
4657 minor->debugfs_root,
4658 to_i915(minor->dev),
Daniel Vetter34b96742013-07-04 20:49:44 +02004659 i915_debugfs_files[i].fops);
Noralf Trønnesb05eeb02017-01-26 23:56:21 +01004660 if (!ent)
4661 return -ENOMEM;
Daniel Vetter34b96742013-07-04 20:49:44 +02004662 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004663
Ben Gamari27c202a2009-07-01 22:26:52 -04004664 return drm_debugfs_create_files(i915_debugfs_list,
4665 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004666 minor->debugfs_root, minor);
4667}
4668
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004669struct dpcd_block {
4670 /* DPCD dump start address. */
4671 unsigned int offset;
4672 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4673 unsigned int end;
4674 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4675 size_t size;
4676 /* Only valid for eDP. */
4677 bool edp;
4678};
4679
4680static const struct dpcd_block i915_dpcd_debug[] = {
4681 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4682 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4683 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4684 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4685 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4686 { .offset = DP_SET_POWER },
4687 { .offset = DP_EDP_DPCD_REV },
4688 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4689 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4690 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4691};
4692
4693static int i915_dpcd_show(struct seq_file *m, void *data)
4694{
4695 struct drm_connector *connector = m->private;
4696 struct intel_dp *intel_dp =
4697 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Jani Nikulae5315212019-01-16 11:15:23 +02004698 u8 buf[16];
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004699 ssize_t err;
4700 int i;
4701
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004702 if (connector->status != connector_status_connected)
4703 return -ENODEV;
4704
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004705 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4706 const struct dpcd_block *b = &i915_dpcd_debug[i];
4707 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4708
4709 if (b->edp &&
4710 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4711 continue;
4712
4713 /* low tech for now */
4714 if (WARN_ON(size > sizeof(buf)))
4715 continue;
4716
4717 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
Chris Wilson65404c82018-10-10 09:17:06 +01004718 if (err < 0)
4719 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4720 else
4721 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004722 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004723
4724 return 0;
4725}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004726DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004727
David Weinehallecbd6782016-08-23 12:23:56 +03004728static int i915_panel_show(struct seq_file *m, void *data)
4729{
4730 struct drm_connector *connector = m->private;
4731 struct intel_dp *intel_dp =
4732 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4733
4734 if (connector->status != connector_status_connected)
4735 return -ENODEV;
4736
4737 seq_printf(m, "Panel power up delay: %d\n",
4738 intel_dp->panel_power_up_delay);
4739 seq_printf(m, "Panel power down delay: %d\n",
4740 intel_dp->panel_power_down_delay);
4741 seq_printf(m, "Backlight on delay: %d\n",
4742 intel_dp->backlight_on_delay);
4743 seq_printf(m, "Backlight off delay: %d\n",
4744 intel_dp->backlight_off_delay);
4745
4746 return 0;
4747}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004748DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004749
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304750static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4751{
4752 struct drm_connector *connector = m->private;
4753 struct intel_connector *intel_connector = to_intel_connector(connector);
4754
4755 if (connector->status != connector_status_connected)
4756 return -ENODEV;
4757
4758 /* HDCP is supported by connector */
Ramalingam Cd3dacc72018-10-29 15:15:46 +05304759 if (!intel_connector->hdcp.shim)
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304760 return -EINVAL;
4761
4762 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4763 connector->base.id);
4764 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4765 "None" : "HDCP1.4");
4766 seq_puts(m, "\n");
4767
4768 return 0;
4769}
4770DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4771
Manasi Navaree845f092018-12-05 16:54:07 -08004772static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4773{
4774 struct drm_connector *connector = m->private;
4775 struct drm_device *dev = connector->dev;
4776 struct drm_crtc *crtc;
4777 struct intel_dp *intel_dp;
4778 struct drm_modeset_acquire_ctx ctx;
4779 struct intel_crtc_state *crtc_state = NULL;
4780 int ret = 0;
4781 bool try_again = false;
4782
4783 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4784
4785 do {
Manasi Navare6afe8922018-12-19 15:51:20 -08004786 try_again = false;
Manasi Navaree845f092018-12-05 16:54:07 -08004787 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4788 &ctx);
4789 if (ret) {
Chris Wilsonee6df562019-03-29 16:51:52 +00004790 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4791 try_again = true;
4792 continue;
4793 }
Manasi Navaree845f092018-12-05 16:54:07 -08004794 break;
4795 }
4796 crtc = connector->state->crtc;
4797 if (connector->status != connector_status_connected || !crtc) {
4798 ret = -ENODEV;
4799 break;
4800 }
4801 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4802 if (ret == -EDEADLK) {
4803 ret = drm_modeset_backoff(&ctx);
4804 if (!ret) {
4805 try_again = true;
4806 continue;
4807 }
4808 break;
4809 } else if (ret) {
4810 break;
4811 }
4812 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4813 crtc_state = to_intel_crtc_state(crtc->state);
4814 seq_printf(m, "DSC_Enabled: %s\n",
4815 yesno(crtc_state->dsc_params.compression_enable));
Radhakrishna Sripadafed85692019-01-09 13:14:14 -08004816 seq_printf(m, "DSC_Sink_Support: %s\n",
4817 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
Manasi Navarefeb88462019-04-05 15:48:21 -07004818 seq_printf(m, "Force_DSC_Enable: %s\n",
4819 yesno(intel_dp->force_dsc_en));
Manasi Navaree845f092018-12-05 16:54:07 -08004820 if (!intel_dp_is_edp(intel_dp))
4821 seq_printf(m, "FEC_Sink_Support: %s\n",
4822 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4823 } while (try_again);
4824
4825 drm_modeset_drop_locks(&ctx);
4826 drm_modeset_acquire_fini(&ctx);
4827
4828 return ret;
4829}
4830
4831static ssize_t i915_dsc_fec_support_write(struct file *file,
4832 const char __user *ubuf,
4833 size_t len, loff_t *offp)
4834{
4835 bool dsc_enable = false;
4836 int ret;
4837 struct drm_connector *connector =
4838 ((struct seq_file *)file->private_data)->private;
4839 struct intel_encoder *encoder = intel_attached_encoder(connector);
4840 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4841
4842 if (len == 0)
4843 return 0;
4844
4845 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4846 len);
4847
4848 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4849 if (ret < 0)
4850 return ret;
4851
4852 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4853 (dsc_enable) ? "true" : "false");
4854 intel_dp->force_dsc_en = dsc_enable;
4855
4856 *offp += len;
4857 return len;
4858}
4859
4860static int i915_dsc_fec_support_open(struct inode *inode,
4861 struct file *file)
4862{
4863 return single_open(file, i915_dsc_fec_support_show,
4864 inode->i_private);
4865}
4866
4867static const struct file_operations i915_dsc_fec_support_fops = {
4868 .owner = THIS_MODULE,
4869 .open = i915_dsc_fec_support_open,
4870 .read = seq_read,
4871 .llseek = seq_lseek,
4872 .release = single_release,
4873 .write = i915_dsc_fec_support_write
4874};
4875
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004876/**
4877 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4878 * @connector: pointer to a registered drm_connector
4879 *
4880 * Cleanup will be done by drm_connector_unregister() through a call to
4881 * drm_debugfs_connector_remove().
4882 *
4883 * Returns 0 on success, negative error codes on error.
4884 */
4885int i915_debugfs_connector_add(struct drm_connector *connector)
4886{
4887 struct dentry *root = connector->debugfs_entry;
Manasi Navaree845f092018-12-05 16:54:07 -08004888 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004889
4890 /* The connector must have been registered beforehands. */
4891 if (!root)
4892 return -ENODEV;
4893
4894 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4895 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004896 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4897 connector, &i915_dpcd_fops);
4898
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004899 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
David Weinehallecbd6782016-08-23 12:23:56 +03004900 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4901 connector, &i915_panel_fops);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004902 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4903 connector, &i915_psr_sink_status_fops);
4904 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004905
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304906 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4907 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4908 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4909 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4910 connector, &i915_hdcp_sink_capability_fops);
4911 }
4912
Manasi Navaree845f092018-12-05 16:54:07 -08004913 if (INTEL_GEN(dev_priv) >= 10 &&
4914 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4915 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4916 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4917 connector, &i915_dsc_fec_support_fops);
4918
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004919 return 0;
4920}