blob: 2c640987c24dfe65bb5f09614daf4789c9d97660 [file] [log] [blame]
Ben Gamari20172632009-02-17 20:08:50 -05001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +010029#include <linux/sched/mm.h>
Jani Nikula98afa312019-04-05 14:00:08 +030030#include <linux/sort.h>
31
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010032#include <drm/drm_debugfs.h>
33#include <drm/drm_fourcc.h>
Ben Gamari20172632009-02-17 20:08:50 -050034
Jani Nikula1d455f82019-08-06 14:39:33 +030035#include "display/intel_display_types.h"
Jani Nikula379bc102019-06-13 11:44:15 +030036#include "display/intel_dp.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030037#include "display/intel_fbc.h"
38#include "display/intel_hdcp.h"
Jani Nikula379bc102019-06-13 11:44:15 +030039#include "display/intel_hdmi.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030040#include "display/intel_psr.h"
Jani Nikula379bc102019-06-13 11:44:15 +030041
Chris Wilson10be98a2019-05-28 10:29:49 +010042#include "gem/i915_gem_context.h"
Chris Wilsonc7302f22019-08-08 21:27:58 +010043#include "gt/intel_gt_pm.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010044#include "gt/intel_reset.h"
Daniele Ceraolo Spurio0f261b22019-07-13 11:00:11 +010045#include "gt/uc/intel_guc_submission.h"
Chris Wilson112ed2d2019-04-24 18:48:39 +010046
Jani Nikula2126d3e2019-05-02 18:02:43 +030047#include "i915_debugfs.h"
Jani Nikula440e2b32019-04-29 15:29:27 +030048#include "i915_irq.h"
Jani Nikulaa09d9a82019-08-06 13:07:28 +030049#include "i915_trace.h"
Jani Nikula61764902019-05-02 18:02:39 +030050#include "intel_csr.h"
Jani Nikula696173b2019-04-05 14:00:15 +030051#include "intel_pm.h"
Chris Wilson56c50982019-04-26 09:17:22 +010052#include "intel_sideband.h"
Chris Wilson9f588922019-01-16 15:33:04 +000053
David Weinehall36cdd012016-08-22 13:59:31 +030054static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
55{
56 return to_i915(node->minor->dev);
57}
58
Chris Wilson70d39fe2010-08-25 16:03:34 +010059static int i915_capabilities(struct seq_file *m, void *data)
60{
David Weinehall36cdd012016-08-22 13:59:31 +030061 struct drm_i915_private *dev_priv = node_to_i915(m->private);
62 const struct intel_device_info *info = INTEL_INFO(dev_priv);
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000063 struct drm_printer p = drm_seq_file_printer(m);
Chris Wilson70d39fe2010-08-25 16:03:34 +010064
David Weinehall36cdd012016-08-22 13:59:31 +030065 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
Jani Nikula2e0d26f2016-12-01 14:49:55 +020066 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
David Weinehall36cdd012016-08-22 13:59:31 +030067 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
Chris Wilson418e3cd2017-02-06 21:36:08 +000068
Michal Wajdeczkoa8c9b842017-12-19 11:43:44 +000069 intel_device_info_dump_flags(info, &p);
Jani Nikula02584042018-12-31 16:56:41 +020070 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
Chris Wilson3fed1802018-02-07 21:05:43 +000071 intel_driver_caps_print(&dev_priv->caps, &p);
Chris Wilson70d39fe2010-08-25 16:03:34 +010072
Chris Wilson418e3cd2017-02-06 21:36:08 +000073 kernel_param_lock(THIS_MODULE);
Michal Wajdeczkoacfb9972017-12-19 11:43:46 +000074 i915_params_dump(&i915_modparams, &p);
Chris Wilson418e3cd2017-02-06 21:36:08 +000075 kernel_param_unlock(THIS_MODULE);
76
Chris Wilson70d39fe2010-08-25 16:03:34 +010077 return 0;
78}
Ben Gamari433e12f2009-02-17 20:08:51 -050079
Imre Deaka7363de2016-05-12 16:18:52 +030080static char get_pin_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010081{
Chris Wilsonbd3d2252017-10-13 21:26:14 +010082 return obj->pin_global ? 'p' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010083}
84
Imre Deaka7363de2016-05-12 16:18:52 +030085static char get_tiling_flag(struct drm_i915_gem_object *obj)
Chris Wilsona6172a82009-02-11 14:26:38 +000086{
Chris Wilson3e510a82016-08-05 10:14:23 +010087 switch (i915_gem_object_get_tiling(obj)) {
Akshay Joshi0206e352011-08-16 15:34:10 -040088 default:
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010089 case I915_TILING_NONE: return ' ';
90 case I915_TILING_X: return 'X';
91 case I915_TILING_Y: return 'Y';
Akshay Joshi0206e352011-08-16 15:34:10 -040092 }
Chris Wilsona6172a82009-02-11 14:26:38 +000093}
94
Imre Deaka7363de2016-05-12 16:18:52 +030095static char get_global_flag(struct drm_i915_gem_object *obj)
Ben Widawsky1d693bc2013-07-31 17:00:00 -070096{
Chris Wilsona65adaf2017-10-09 09:43:57 +010097 return obj->userfault_count ? 'g' : ' ';
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +010098}
99
Imre Deaka7363de2016-05-12 16:18:52 +0300100static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100101{
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100102 return obj->mm.mapping ? 'M' : ' ';
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700103}
104
Matthew Auld7393b7e2017-10-06 23:18:28 +0100105static const char *
106stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
107{
108 size_t x = 0;
109
110 switch (page_sizes) {
111 case 0:
112 return "";
113 case I915_GTT_PAGE_SIZE_4K:
114 return "4K";
115 case I915_GTT_PAGE_SIZE_64K:
116 return "64K";
117 case I915_GTT_PAGE_SIZE_2M:
118 return "2M";
119 default:
120 if (!buf)
121 return "M";
122
123 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
124 x += snprintf(buf + x, len - x, "2M, ");
125 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
126 x += snprintf(buf + x, len - x, "64K, ");
127 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
128 x += snprintf(buf + x, len - x, "4K, ");
129 buf[x-2] = '\0';
130
131 return buf;
132 }
133}
134
Chris Wilson37811fc2010-08-25 22:45:57 +0100135static void
136describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
137{
Chris Wilsonb4716182015-04-27 13:41:17 +0100138 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000139 struct intel_engine_cs *engine;
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700140 struct i915_vma *vma;
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100141 unsigned int frontbuffer_bits;
Ben Widawskyd7f46fc2013-12-06 14:10:55 -0800142 int pin_count = 0;
143
Chris Wilsona93615f2019-06-21 19:37:59 +0100144 seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
Chris Wilson37811fc2010-08-25 22:45:57 +0100145 &obj->base,
146 get_pin_flag(obj),
147 get_tiling_flag(obj),
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700148 get_global_flag(obj),
Tvrtko Ursulinbe12a862016-04-15 11:34:52 +0100149 get_pin_mapped_flag(obj),
Eric Anholta05a5862011-12-20 08:54:15 -0800150 obj->base.size / 1024,
Christian Königc0a51fd2018-02-16 13:43:38 +0100151 obj->read_domains,
152 obj->write_domain,
David Weinehall36cdd012016-08-22 13:59:31 +0300153 i915_cache_level_str(dev_priv, obj->cache_level),
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100154 obj->mm.dirty ? " dirty" : "",
155 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
Chris Wilson37811fc2010-08-25 22:45:57 +0100156 if (obj->base.name)
157 seq_printf(m, " (name: %d)", obj->base.name);
Chris Wilson0cf289b2019-06-13 08:32:54 +0100158
159 spin_lock(&obj->vma.lock);
Chris Wilson528cbd12019-01-28 10:23:54 +0000160 list_for_each_entry(vma, &obj->vma.list, obj_link) {
Chris Wilson15717de2016-08-04 07:52:26 +0100161 if (!drm_mm_node_allocated(&vma->node))
162 continue;
163
Chris Wilson0cf289b2019-06-13 08:32:54 +0100164 spin_unlock(&obj->vma.lock);
165
166 if (i915_vma_is_pinned(vma))
167 pin_count++;
168
Matthew Auld7393b7e2017-10-06 23:18:28 +0100169 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
Chris Wilson3272db52016-08-04 16:32:32 +0100170 i915_vma_is_ggtt(vma) ? "g" : "pp",
Matthew Auld7393b7e2017-10-06 23:18:28 +0100171 vma->node.start, vma->node.size,
172 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
Chris Wilson21976852017-01-12 11:21:08 +0000173 if (i915_vma_is_ggtt(vma)) {
174 switch (vma->ggtt_view.type) {
175 case I915_GGTT_VIEW_NORMAL:
176 seq_puts(m, ", normal");
177 break;
178
179 case I915_GGTT_VIEW_PARTIAL:
180 seq_printf(m, ", partial [%08llx+%x]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000181 vma->ggtt_view.partial.offset << PAGE_SHIFT,
182 vma->ggtt_view.partial.size << PAGE_SHIFT);
Chris Wilson21976852017-01-12 11:21:08 +0000183 break;
184
185 case I915_GGTT_VIEW_ROTATED:
186 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
Chris Wilson8bab11932017-01-14 00:28:25 +0000187 vma->ggtt_view.rotated.plane[0].width,
188 vma->ggtt_view.rotated.plane[0].height,
189 vma->ggtt_view.rotated.plane[0].stride,
190 vma->ggtt_view.rotated.plane[0].offset,
191 vma->ggtt_view.rotated.plane[1].width,
192 vma->ggtt_view.rotated.plane[1].height,
193 vma->ggtt_view.rotated.plane[1].stride,
194 vma->ggtt_view.rotated.plane[1].offset);
Chris Wilson21976852017-01-12 11:21:08 +0000195 break;
196
Ville Syrjälä1a74fc02019-05-09 15:21:52 +0300197 case I915_GGTT_VIEW_REMAPPED:
198 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
199 vma->ggtt_view.remapped.plane[0].width,
200 vma->ggtt_view.remapped.plane[0].height,
201 vma->ggtt_view.remapped.plane[0].stride,
202 vma->ggtt_view.remapped.plane[0].offset,
203 vma->ggtt_view.remapped.plane[1].width,
204 vma->ggtt_view.remapped.plane[1].height,
205 vma->ggtt_view.remapped.plane[1].stride,
206 vma->ggtt_view.remapped.plane[1].offset);
207 break;
208
Chris Wilson21976852017-01-12 11:21:08 +0000209 default:
210 MISSING_CASE(vma->ggtt_view.type);
211 break;
212 }
213 }
Chris Wilson49ef5292016-08-18 17:17:00 +0100214 if (vma->fence)
Chris Wilson3d6792c2019-08-12 18:48:03 +0100215 seq_printf(m, " , fence: %d", vma->fence->id);
Chris Wilson596c5922016-02-26 11:03:20 +0000216 seq_puts(m, ")");
Chris Wilson0cf289b2019-06-13 08:32:54 +0100217
218 spin_lock(&obj->vma.lock);
Ben Widawsky1d693bc2013-07-31 17:00:00 -0700219 }
Chris Wilson0cf289b2019-06-13 08:32:54 +0100220 spin_unlock(&obj->vma.lock);
221
222 seq_printf(m, " (pinned x %d)", pin_count);
Chris Wilsonc1ad11f2012-11-15 11:32:21 +0000223 if (obj->stolen)
Thierry Reding440fd522015-01-23 09:05:06 +0100224 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
Chris Wilson0cf289b2019-06-13 08:32:54 +0100225 if (obj->pin_global)
226 seq_printf(m, " (global)");
Chris Wilson27c01aa2016-08-04 07:52:30 +0100227
Chris Wilsond07f0e52016-10-28 13:58:44 +0100228 engine = i915_gem_object_last_write_engine(obj);
Chris Wilson27c01aa2016-08-04 07:52:30 +0100229 if (engine)
230 seq_printf(m, " (%s)", engine->name);
231
Chris Wilsonfaf5bf02016-08-04 16:32:37 +0100232 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
233 if (frontbuffer_bits)
234 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
Chris Wilson37811fc2010-08-25 22:45:57 +0100235}
236
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100237struct file_stats {
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000238 struct i915_address_space *vm;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300239 unsigned long count;
240 u64 total, unbound;
Mika Kuoppalac44ef602015-06-25 18:35:05 +0300241 u64 active, inactive;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000242 u64 closed;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100243};
244
245static int per_file_stats(int id, void *ptr, void *data)
246{
247 struct drm_i915_gem_object *obj = ptr;
248 struct file_stats *stats = data;
Chris Wilson6313c202014-03-19 13:45:45 +0000249 struct i915_vma *vma;
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100250
251 stats->count++;
252 stats->total += obj->base.size;
Chris Wilsonecab9be2019-06-12 11:57:20 +0100253 if (!atomic_read(&obj->bind_count))
Chris Wilson15717de2016-08-04 07:52:26 +0100254 stats->unbound += obj->base.size;
Chris Wilsonc67a17e2014-03-19 13:45:46 +0000255
Chris Wilson5b5efdf2019-08-08 17:24:07 +0100256 spin_lock(&obj->vma.lock);
257 if (!stats->vm) {
258 for_each_ggtt_vma(vma, obj) {
259 if (!drm_mm_node_allocated(&vma->node))
Chris Wilson6313c202014-03-19 13:45:45 +0000260 continue;
Chris Wilson5b5efdf2019-08-08 17:24:07 +0100261
262 if (i915_vma_is_active(vma))
263 stats->active += vma->node.size;
264 else
265 stats->inactive += vma->node.size;
266
267 if (i915_vma_is_closed(vma))
268 stats->closed += vma->node.size;
Chris Wilson6313c202014-03-19 13:45:45 +0000269 }
Chris Wilson5b5efdf2019-08-08 17:24:07 +0100270 } else {
271 struct rb_node *p = obj->vma.tree.rb_node;
Chris Wilson894eeec2016-08-04 07:52:20 +0100272
Chris Wilson5b5efdf2019-08-08 17:24:07 +0100273 while (p) {
274 long cmp;
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000275
Chris Wilson5b5efdf2019-08-08 17:24:07 +0100276 vma = rb_entry(p, typeof(*vma), obj_node);
277 cmp = i915_vma_compare(vma, stats->vm, NULL);
278 if (cmp == 0) {
279 if (drm_mm_node_allocated(&vma->node)) {
280 if (i915_vma_is_active(vma))
281 stats->active += vma->node.size;
282 else
283 stats->inactive += vma->node.size;
284
285 if (i915_vma_is_closed(vma))
286 stats->closed += vma->node.size;
287 }
288 break;
289 }
290 if (cmp < 0)
291 p = p->rb_right;
292 else
293 p = p->rb_left;
294 }
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100295 }
Chris Wilson5b5efdf2019-08-08 17:24:07 +0100296 spin_unlock(&obj->vma.lock);
Chris Wilson2db8e9d2013-06-04 23:49:08 +0100297
298 return 0;
299}
300
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100301#define print_file_stats(m, name, stats) do { \
302 if (stats.count) \
Chris Wilson5b5efdf2019-08-08 17:24:07 +0100303 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100304 name, \
305 stats.count, \
306 stats.total, \
307 stats.active, \
308 stats.inactive, \
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000309 stats.unbound, \
310 stats.closed); \
Chris Wilsonb0da1b72015-04-07 16:20:40 +0100311} while (0)
Brad Volkin493018d2014-12-11 12:13:08 -0800312
Chris Wilson15da9562016-05-24 14:53:43 +0100313static void print_context_stats(struct seq_file *m,
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000314 struct drm_i915_private *i915)
Chris Wilson15da9562016-05-24 14:53:43 +0100315{
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000316 struct file_stats kstats = {};
317 struct i915_gem_context *ctx;
Chris Wilson15da9562016-05-24 14:53:43 +0100318
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000319 list_for_each_entry(ctx, &i915->contexts.list, link) {
Chris Wilson02684442019-04-26 17:33:35 +0100320 struct i915_gem_engines_iter it;
Chris Wilson7e3d9a52019-03-08 13:25:16 +0000321 struct intel_context *ce;
Chris Wilson15da9562016-05-24 14:53:43 +0100322
Chris Wilson02684442019-04-26 17:33:35 +0100323 for_each_gem_engine(ce,
324 i915_gem_context_lock_engines(ctx), it) {
Chris Wilson48ae3972019-08-09 19:25:17 +0100325 intel_context_lock_pinned(ce);
326 if (intel_context_is_pinned(ce)) {
327 if (ce->state)
328 per_file_stats(0,
329 ce->state->obj, &kstats);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000330 per_file_stats(0, ce->ring->vma->obj, &kstats);
Chris Wilson48ae3972019-08-09 19:25:17 +0100331 }
332 intel_context_unlock_pinned(ce);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000333 }
Chris Wilson02684442019-04-26 17:33:35 +0100334 i915_gem_context_unlock_engines(ctx);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000335
336 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
Chris Wilsone568ac32019-06-11 10:12:37 +0100337 struct file_stats stats = { .vm = ctx->vm, };
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000338 struct drm_file *file = ctx->file_priv->file;
339 struct task_struct *task;
340 char name[80];
341
342 spin_lock(&file->table_lock);
343 idr_for_each(&file->object_idr, per_file_stats, &stats);
344 spin_unlock(&file->table_lock);
345
346 rcu_read_lock();
347 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
Chris Wilson3e055312019-03-21 14:07:10 +0000348 snprintf(name, sizeof(name), "%s",
349 task ? task->comm : "<unknown>");
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000350 rcu_read_unlock();
351
352 print_file_stats(m, name, stats);
353 }
Chris Wilson15da9562016-05-24 14:53:43 +0100354 }
Chris Wilson15da9562016-05-24 14:53:43 +0100355
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000356 print_file_stats(m, "[k]contexts", kstats);
Chris Wilson15da9562016-05-24 14:53:43 +0100357}
358
David Weinehall36cdd012016-08-22 13:59:31 +0300359static int i915_gem_object_info(struct seq_file *m, void *data)
Chris Wilson73aa8082010-09-30 11:46:12 +0100360{
Chris Wilsonecab9be2019-06-12 11:57:20 +0100361 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilson73aa8082010-09-30 11:46:12 +0100362 int ret;
363
Chris Wilson1aff1902019-08-02 22:21:36 +0100364 seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
Chris Wilsonecab9be2019-06-12 11:57:20 +0100365 i915->mm.shrink_count,
Chris Wilson1aff1902019-08-02 22:21:36 +0100366 atomic_read(&i915->mm.free_count),
Chris Wilsonecab9be2019-06-12 11:57:20 +0100367 i915->mm.shrink_memory);
Chris Wilson73aa8082010-09-30 11:46:12 +0100368
Damien Lespiau267f0c92013-06-24 22:59:48 +0100369 seq_putc(m, '\n');
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000370
Chris Wilsonecab9be2019-06-12 11:57:20 +0100371 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsonf6e8aa32019-01-07 11:54:25 +0000372 if (ret)
373 return ret;
374
Chris Wilsonecab9be2019-06-12 11:57:20 +0100375 print_context_stats(m, i915);
376 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilson73aa8082010-09-30 11:46:12 +0100377
378 return 0;
379}
380
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200381static void gen8_display_interrupt_info(struct seq_file *m)
382{
383 struct drm_i915_private *dev_priv = node_to_i915(m->private);
384 int pipe;
385
386 for_each_pipe(dev_priv, pipe) {
387 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000388 intel_wakeref_t wakeref;
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200389
390 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000391 wakeref = intel_display_power_get_if_enabled(dev_priv,
392 power_domain);
393 if (!wakeref) {
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200394 seq_printf(m, "Pipe %c power disabled\n",
395 pipe_name(pipe));
396 continue;
397 }
398 seq_printf(m, "Pipe %c IMR:\t%08x\n",
399 pipe_name(pipe),
400 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
401 seq_printf(m, "Pipe %c IIR:\t%08x\n",
402 pipe_name(pipe),
403 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
404 seq_printf(m, "Pipe %c IER:\t%08x\n",
405 pipe_name(pipe),
406 I915_READ(GEN8_DE_PIPE_IER(pipe)));
407
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000408 intel_display_power_put(dev_priv, power_domain, wakeref);
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200409 }
410
411 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
412 I915_READ(GEN8_DE_PORT_IMR));
413 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
414 I915_READ(GEN8_DE_PORT_IIR));
415 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
416 I915_READ(GEN8_DE_PORT_IER));
417
418 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
419 I915_READ(GEN8_DE_MISC_IMR));
420 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
421 I915_READ(GEN8_DE_MISC_IIR));
422 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
423 I915_READ(GEN8_DE_MISC_IER));
424
425 seq_printf(m, "PCU interrupt mask:\t%08x\n",
426 I915_READ(GEN8_PCU_IMR));
427 seq_printf(m, "PCU interrupt identity:\t%08x\n",
428 I915_READ(GEN8_PCU_IIR));
429 seq_printf(m, "PCU interrupt enable:\t%08x\n",
430 I915_READ(GEN8_PCU_IER));
431}
432
Ben Gamari20172632009-02-17 20:08:50 -0500433static int i915_interrupt_info(struct seq_file *m, void *data)
434{
David Weinehall36cdd012016-08-22 13:59:31 +0300435 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000436 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +0000437 intel_wakeref_t wakeref;
Chris Wilson4bb05042016-09-03 07:53:43 +0100438 int i, pipe;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100439
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700440 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
Ben Gamari20172632009-02-17 20:08:50 -0500441
David Weinehall36cdd012016-08-22 13:59:31 +0300442 if (IS_CHERRYVIEW(dev_priv)) {
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000443 intel_wakeref_t pref;
444
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300445 seq_printf(m, "Master Interrupt Control:\t%08x\n",
446 I915_READ(GEN8_MASTER_IRQ));
447
448 seq_printf(m, "Display IER:\t%08x\n",
449 I915_READ(VLV_IER));
450 seq_printf(m, "Display IIR:\t%08x\n",
451 I915_READ(VLV_IIR));
452 seq_printf(m, "Display IIR_RW:\t%08x\n",
453 I915_READ(VLV_IIR_RW));
454 seq_printf(m, "Display IMR:\t%08x\n",
455 I915_READ(VLV_IMR));
Chris Wilson9c870d02016-10-24 13:42:15 +0100456 for_each_pipe(dev_priv, pipe) {
457 enum intel_display_power_domain power_domain;
458
459 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000460 pref = intel_display_power_get_if_enabled(dev_priv,
461 power_domain);
462 if (!pref) {
Chris Wilson9c870d02016-10-24 13:42:15 +0100463 seq_printf(m, "Pipe %c power disabled\n",
464 pipe_name(pipe));
465 continue;
466 }
467
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300468 seq_printf(m, "Pipe %c stat:\t%08x\n",
469 pipe_name(pipe),
470 I915_READ(PIPESTAT(pipe)));
471
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000472 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson9c870d02016-10-24 13:42:15 +0100473 }
474
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000475 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300476 seq_printf(m, "Port hotplug:\t%08x\n",
477 I915_READ(PORT_HOTPLUG_EN));
478 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
479 I915_READ(VLV_DPFLIPSTAT));
480 seq_printf(m, "DPINVGTT:\t%08x\n",
481 I915_READ(DPINVGTT));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000482 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
Ville Syrjälä74e1ca82014-04-09 13:28:09 +0300483
484 for (i = 0; i < 4; i++) {
485 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
486 i, I915_READ(GEN8_GT_IMR(i)));
487 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
488 i, I915_READ(GEN8_GT_IIR(i)));
489 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
490 i, I915_READ(GEN8_GT_IER(i)));
491 }
492
493 seq_printf(m, "PCU interrupt mask:\t%08x\n",
494 I915_READ(GEN8_PCU_IMR));
495 seq_printf(m, "PCU interrupt identity:\t%08x\n",
496 I915_READ(GEN8_PCU_IIR));
497 seq_printf(m, "PCU interrupt enable:\t%08x\n",
498 I915_READ(GEN8_PCU_IER));
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200499 } else if (INTEL_GEN(dev_priv) >= 11) {
500 seq_printf(m, "Master Interrupt Control: %08x\n",
501 I915_READ(GEN11_GFX_MSTR_IRQ));
502
503 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
504 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
505 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
506 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
507 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
508 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
509 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
510 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
511 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
512 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
513 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
514 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
515
516 seq_printf(m, "Display Interrupt Control:\t%08x\n",
517 I915_READ(GEN11_DISPLAY_INT_CTL));
518
519 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300520 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskya123f152013-11-02 21:07:10 -0700521 seq_printf(m, "Master Interrupt Control:\t%08x\n",
522 I915_READ(GEN8_MASTER_IRQ));
523
524 for (i = 0; i < 4; i++) {
525 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
526 i, I915_READ(GEN8_GT_IMR(i)));
527 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
528 i, I915_READ(GEN8_GT_IIR(i)));
529 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
530 i, I915_READ(GEN8_GT_IER(i)));
531 }
532
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200533 gen8_display_interrupt_info(m);
David Weinehall36cdd012016-08-22 13:59:31 +0300534 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700535 seq_printf(m, "Display IER:\t%08x\n",
536 I915_READ(VLV_IER));
537 seq_printf(m, "Display IIR:\t%08x\n",
538 I915_READ(VLV_IIR));
539 seq_printf(m, "Display IIR_RW:\t%08x\n",
540 I915_READ(VLV_IIR_RW));
541 seq_printf(m, "Display IMR:\t%08x\n",
542 I915_READ(VLV_IMR));
Chris Wilson4f4631a2017-02-10 13:36:32 +0000543 for_each_pipe(dev_priv, pipe) {
544 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000545 intel_wakeref_t pref;
Chris Wilson4f4631a2017-02-10 13:36:32 +0000546
547 power_domain = POWER_DOMAIN_PIPE(pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000548 pref = intel_display_power_get_if_enabled(dev_priv,
549 power_domain);
550 if (!pref) {
Chris Wilson4f4631a2017-02-10 13:36:32 +0000551 seq_printf(m, "Pipe %c power disabled\n",
552 pipe_name(pipe));
553 continue;
554 }
555
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700556 seq_printf(m, "Pipe %c stat:\t%08x\n",
557 pipe_name(pipe),
558 I915_READ(PIPESTAT(pipe)));
Chris Wilson0e6e0be2019-01-14 14:21:24 +0000559 intel_display_power_put(dev_priv, power_domain, pref);
Chris Wilson4f4631a2017-02-10 13:36:32 +0000560 }
Jesse Barnes7e231dbe2012-03-28 13:39:38 -0700561
562 seq_printf(m, "Master IER:\t%08x\n",
563 I915_READ(VLV_MASTER_IER));
564
565 seq_printf(m, "Render IER:\t%08x\n",
566 I915_READ(GTIER));
567 seq_printf(m, "Render IIR:\t%08x\n",
568 I915_READ(GTIIR));
569 seq_printf(m, "Render IMR:\t%08x\n",
570 I915_READ(GTIMR));
571
572 seq_printf(m, "PM IER:\t\t%08x\n",
573 I915_READ(GEN6_PMIER));
574 seq_printf(m, "PM IIR:\t\t%08x\n",
575 I915_READ(GEN6_PMIIR));
576 seq_printf(m, "PM IMR:\t\t%08x\n",
577 I915_READ(GEN6_PMIMR));
578
579 seq_printf(m, "Port hotplug:\t%08x\n",
580 I915_READ(PORT_HOTPLUG_EN));
581 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
582 I915_READ(VLV_DPFLIPSTAT));
583 seq_printf(m, "DPINVGTT:\t%08x\n",
584 I915_READ(DPINVGTT));
585
David Weinehall36cdd012016-08-22 13:59:31 +0300586 } else if (!HAS_PCH_SPLIT(dev_priv)) {
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800587 seq_printf(m, "Interrupt enable: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700588 I915_READ(GEN2_IER));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800589 seq_printf(m, "Interrupt identity: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700590 I915_READ(GEN2_IIR));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800591 seq_printf(m, "Interrupt mask: %08x\n",
Paulo Zanoni9d9523d2019-04-10 16:53:42 -0700592 I915_READ(GEN2_IMR));
Damien Lespiau055e3932014-08-18 13:49:10 +0100593 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800594 seq_printf(m, "Pipe %c stat: %08x\n",
595 pipe_name(pipe),
596 I915_READ(PIPESTAT(pipe)));
Zhenyu Wang5f6a1692009-08-10 21:37:24 +0800597 } else {
598 seq_printf(m, "North Display Interrupt enable: %08x\n",
599 I915_READ(DEIER));
600 seq_printf(m, "North Display Interrupt identity: %08x\n",
601 I915_READ(DEIIR));
602 seq_printf(m, "North Display Interrupt mask: %08x\n",
603 I915_READ(DEIMR));
604 seq_printf(m, "South Display Interrupt enable: %08x\n",
605 I915_READ(SDEIER));
606 seq_printf(m, "South Display Interrupt identity: %08x\n",
607 I915_READ(SDEIIR));
608 seq_printf(m, "South Display Interrupt mask: %08x\n",
609 I915_READ(SDEIMR));
610 seq_printf(m, "Graphics Interrupt enable: %08x\n",
611 I915_READ(GTIER));
612 seq_printf(m, "Graphics Interrupt identity: %08x\n",
613 I915_READ(GTIIR));
614 seq_printf(m, "Graphics Interrupt mask: %08x\n",
615 I915_READ(GTIMR));
616 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200617
618 if (INTEL_GEN(dev_priv) >= 11) {
619 seq_printf(m, "RCS Intr Mask:\t %08x\n",
620 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
621 seq_printf(m, "BCS Intr Mask:\t %08x\n",
622 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
623 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
624 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
625 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
626 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
627 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
628 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
629 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
630 I915_READ(GEN11_GUC_SG_INTR_MASK));
631 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
632 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
633 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
634 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
635 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
636 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
637
638 } else if (INTEL_GEN(dev_priv) >= 6) {
Chris Wilson750e76b2019-08-06 13:43:00 +0100639 for_each_uabi_engine(engine, dev_priv) {
Chris Wilsona2c7f6f2012-09-01 20:51:22 +0100640 seq_printf(m,
641 "Graphics Interrupt mask (%s): %08x\n",
Daniele Ceraolo Spuriobaba6e52019-03-25 14:49:40 -0700642 engine->name, ENGINE_READ(engine, RING_IMR));
Chris Wilson9862e602011-01-04 22:22:17 +0000643 }
Chris Wilson9862e602011-01-04 22:22:17 +0000644 }
Tvrtko Ursulin80d89352018-02-20 17:37:53 +0200645
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700646 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
Chris Wilsonde227ef2010-07-03 07:58:38 +0100647
Ben Gamari20172632009-02-17 20:08:50 -0500648 return 0;
649}
650
Chris Wilsona6172a82009-02-11 14:26:38 +0000651static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
652{
Chris Wilson0cf289b2019-06-13 08:32:54 +0100653 struct drm_i915_private *i915 = node_to_i915(m->private);
654 unsigned int i;
Chris Wilsonde227ef2010-07-03 07:58:38 +0100655
Chris Wilson0cf289b2019-06-13 08:32:54 +0100656 seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
Chris Wilsona6172a82009-02-11 14:26:38 +0000657
Chris Wilson0cf289b2019-06-13 08:32:54 +0100658 rcu_read_lock();
659 for (i = 0; i < i915->ggtt.num_fences; i++) {
660 struct i915_vma *vma = i915->ggtt.fence_regs[i].vma;
Chris Wilsona6172a82009-02-11 14:26:38 +0000661
Chris Wilson6c085a72012-08-20 11:40:46 +0200662 seq_printf(m, "Fence %d, pin count = %d, object = ",
Chris Wilson0cf289b2019-06-13 08:32:54 +0100663 i, i915->ggtt.fence_regs[i].pin_count);
Chris Wilson49ef5292016-08-18 17:17:00 +0100664 if (!vma)
Damien Lespiau267f0c92013-06-24 22:59:48 +0100665 seq_puts(m, "unused");
Chris Wilsonc2c347a92010-10-27 15:11:53 +0100666 else
Chris Wilson49ef5292016-08-18 17:17:00 +0100667 describe_obj(m, vma->obj);
Damien Lespiau267f0c92013-06-24 22:59:48 +0100668 seq_putc(m, '\n');
Chris Wilsona6172a82009-02-11 14:26:38 +0000669 }
Chris Wilson0cf289b2019-06-13 08:32:54 +0100670 rcu_read_unlock();
Chris Wilsona6172a82009-02-11 14:26:38 +0000671
672 return 0;
673}
674
Chris Wilson98a2f412016-10-12 10:05:18 +0100675#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000676static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
677 size_t count, loff_t *pos)
678{
Chris Wilson0e390372018-11-23 13:23:25 +0000679 struct i915_gpu_state *error;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000680 ssize_t ret;
Chris Wilson0e390372018-11-23 13:23:25 +0000681 void *buf;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000682
Chris Wilson0e390372018-11-23 13:23:25 +0000683 error = file->private_data;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000684 if (!error)
685 return 0;
686
Chris Wilson0e390372018-11-23 13:23:25 +0000687 /* Bounce buffer required because of kernfs __user API convenience. */
688 buf = kmalloc(count, GFP_KERNEL);
689 if (!buf)
690 return -ENOMEM;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000691
Chris Wilson0e390372018-11-23 13:23:25 +0000692 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
693 if (ret <= 0)
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000694 goto out;
695
Chris Wilson0e390372018-11-23 13:23:25 +0000696 if (!copy_to_user(ubuf, buf, ret))
697 *pos += ret;
698 else
699 ret = -EFAULT;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000700
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000701out:
Chris Wilson0e390372018-11-23 13:23:25 +0000702 kfree(buf);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000703 return ret;
704}
705
706static int gpu_state_release(struct inode *inode, struct file *file)
707{
708 i915_gpu_state_put(file->private_data);
709 return 0;
710}
711
712static int i915_gpu_info_open(struct inode *inode, struct file *file)
713{
Chris Wilson090e5fe2017-03-28 14:14:07 +0100714 struct drm_i915_private *i915 = inode->i_private;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000715 struct i915_gpu_state *gpu;
Chris Wilsona0371212019-01-14 14:21:14 +0000716 intel_wakeref_t wakeref;
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000717
Chris Wilsond4225a52019-01-14 14:21:23 +0000718 gpu = NULL;
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -0700719 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
Chris Wilsond4225a52019-01-14 14:21:23 +0000720 gpu = i915_capture_gpu_state(i915);
Chris Wilsone6154e42018-12-07 11:05:54 +0000721 if (IS_ERR(gpu))
722 return PTR_ERR(gpu);
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000723
724 file->private_data = gpu;
725 return 0;
726}
727
728static const struct file_operations i915_gpu_info_fops = {
729 .owner = THIS_MODULE,
730 .open = i915_gpu_info_open,
731 .read = gpu_state_read,
732 .llseek = default_llseek,
733 .release = gpu_state_release,
734};
Chris Wilson98a2f412016-10-12 10:05:18 +0100735
Daniel Vetterd5442302012-04-27 15:17:40 +0200736static ssize_t
737i915_error_state_write(struct file *filp,
738 const char __user *ubuf,
739 size_t cnt,
740 loff_t *ppos)
741{
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000742 struct i915_gpu_state *error = filp->private_data;
743
744 if (!error)
745 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200746
747 DRM_DEBUG_DRIVER("Resetting error state\n");
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000748 i915_reset_error_state(error->i915);
Daniel Vetterd5442302012-04-27 15:17:40 +0200749
750 return cnt;
751}
752
753static int i915_error_state_open(struct inode *inode, struct file *file)
754{
Chris Wilsone6154e42018-12-07 11:05:54 +0000755 struct i915_gpu_state *error;
756
757 error = i915_first_error_state(inode->i_private);
758 if (IS_ERR(error))
759 return PTR_ERR(error);
760
761 file->private_data = error;
Mika Kuoppalaedc3d882013-05-23 13:55:35 +0300762 return 0;
Daniel Vetterd5442302012-04-27 15:17:40 +0200763}
764
Daniel Vetterd5442302012-04-27 15:17:40 +0200765static const struct file_operations i915_error_state_fops = {
766 .owner = THIS_MODULE,
767 .open = i915_error_state_open,
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000768 .read = gpu_state_read,
Daniel Vetterd5442302012-04-27 15:17:40 +0200769 .write = i915_error_state_write,
770 .llseek = default_llseek,
Chris Wilson5a4c6f12017-02-14 16:46:11 +0000771 .release = gpu_state_release,
Daniel Vetterd5442302012-04-27 15:17:40 +0200772};
Chris Wilson98a2f412016-10-12 10:05:18 +0100773#endif
774
Deepak Sadb4bd12014-03-31 11:30:02 +0530775static int i915_frequency_info(struct seq_file *m, void *unused)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800776{
David Weinehall36cdd012016-08-22 13:59:31 +0300777 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +0100778 struct intel_uncore *uncore = &dev_priv->uncore;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100779 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsona0371212019-01-14 14:21:14 +0000780 intel_wakeref_t wakeref;
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200781 int ret = 0;
782
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700783 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800784
Lucas De Marchicf819ef2018-12-12 10:10:43 -0800785 if (IS_GEN(dev_priv, 5)) {
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +0100786 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
787 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800788
789 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
790 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
791 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
792 MEMSTAT_VID_SHIFT);
793 seq_printf(m, "Current P-state: %d\n",
794 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
David Weinehall36cdd012016-08-22 13:59:31 +0300795 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +0100796 u32 rpmodectl, freq_sts;
Wayne Boyer666a4532015-12-09 12:29:35 -0800797
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +0100798 rpmodectl = I915_READ(GEN6_RP_CONTROL);
799 seq_printf(m, "Video Turbo Mode: %s\n",
800 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
801 seq_printf(m, "HW control enabled: %s\n",
802 yesno(rpmodectl & GEN6_RP_ENABLE));
803 seq_printf(m, "SW control enabled: %s\n",
804 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
805 GEN6_RP_MEDIA_SW_MODE));
806
Chris Wilson337fa6e2019-04-26 09:17:20 +0100807 vlv_punit_get(dev_priv);
Wayne Boyer666a4532015-12-09 12:29:35 -0800808 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
Chris Wilson337fa6e2019-04-26 09:17:20 +0100809 vlv_punit_put(dev_priv);
810
Wayne Boyer666a4532015-12-09 12:29:35 -0800811 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
812 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
813
814 seq_printf(m, "actual GPU freq: %d MHz\n",
815 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
816
817 seq_printf(m, "current GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100818 intel_gpu_freq(dev_priv, rps->cur_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -0800819
820 seq_printf(m, "max GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100821 intel_gpu_freq(dev_priv, rps->max_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -0800822
823 seq_printf(m, "min GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100824 intel_gpu_freq(dev_priv, rps->min_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -0800825
826 seq_printf(m, "idle GPU freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100827 intel_gpu_freq(dev_priv, rps->idle_freq));
Wayne Boyer666a4532015-12-09 12:29:35 -0800828
829 seq_printf(m,
830 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100831 intel_gpu_freq(dev_priv, rps->efficient_freq));
David Weinehall36cdd012016-08-22 13:59:31 +0300832 } else if (INTEL_GEN(dev_priv) >= 6) {
Bob Paauwe35040562015-06-25 14:54:07 -0700833 u32 rp_state_limits;
834 u32 gt_perf_status;
835 u32 rp_state_cap;
Chris Wilson0d8f9492014-03-27 09:06:14 +0000836 u32 rpmodectl, rpinclimit, rpdeclimit;
Chris Wilson8e8c06c2013-08-26 19:51:01 -0300837 u32 rpstat, cagf, reqf;
Jesse Barnesccab5c82011-01-18 15:49:25 -0800838 u32 rpupei, rpcurup, rpprevup;
839 u32 rpdownei, rpcurdown, rpprevdown;
Paulo Zanoni9dd3c602014-08-01 18:14:48 -0300840 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800841 int max_freq;
842
Bob Paauwe35040562015-06-25 14:54:07 -0700843 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200844 if (IS_GEN9_LP(dev_priv)) {
Bob Paauwe35040562015-06-25 14:54:07 -0700845 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
846 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
847 } else {
848 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
849 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
850 }
851
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800852 /* RPSTAT1 is in the GT power well */
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700853 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800854
Chris Wilson8e8c06c2013-08-26 19:51:01 -0300855 reqf = I915_READ(GEN6_RPNSWREQ);
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -0700856 if (INTEL_GEN(dev_priv) >= 9)
Akash Goel60260a52015-03-06 11:07:21 +0530857 reqf >>= 23;
858 else {
859 reqf &= ~GEN6_TURBO_DISABLE;
David Weinehall36cdd012016-08-22 13:59:31 +0300860 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Akash Goel60260a52015-03-06 11:07:21 +0530861 reqf >>= 24;
862 else
863 reqf >>= 25;
864 }
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200865 reqf = intel_gpu_freq(dev_priv, reqf);
Chris Wilson8e8c06c2013-08-26 19:51:01 -0300866
Chris Wilson0d8f9492014-03-27 09:06:14 +0000867 rpmodectl = I915_READ(GEN6_RP_CONTROL);
868 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
869 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
870
Jesse Barnesccab5c82011-01-18 15:49:25 -0800871 rpstat = I915_READ(GEN6_RPSTAT1);
Akash Goeld6cda9c2016-04-23 00:05:46 +0530872 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
873 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
874 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
875 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
876 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
877 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
Tvrtko Ursulinc84b2702017-11-21 18:18:44 +0000878 cagf = intel_gpu_freq(dev_priv,
879 intel_get_cagf(dev_priv, rpstat));
Jesse Barnesccab5c82011-01-18 15:49:25 -0800880
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -0700881 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Ben Widawskyd1ebd8162011-04-25 20:11:50 +0100882
Oscar Mateo6b7a6a72018-05-10 14:59:55 -0700883 if (INTEL_GEN(dev_priv) >= 11) {
884 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
885 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
886 /*
887 * The equivalent to the PM ISR & IIR cannot be read
888 * without affecting the current state of the system
889 */
890 pm_isr = 0;
891 pm_iir = 0;
892 } else if (INTEL_GEN(dev_priv) >= 8) {
Paulo Zanoni9dd3c602014-08-01 18:14:48 -0300893 pm_ier = I915_READ(GEN8_GT_IER(2));
894 pm_imr = I915_READ(GEN8_GT_IMR(2));
895 pm_isr = I915_READ(GEN8_GT_ISR(2));
896 pm_iir = I915_READ(GEN8_GT_IIR(2));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -0700897 } else {
898 pm_ier = I915_READ(GEN6_PMIER);
899 pm_imr = I915_READ(GEN6_PMIMR);
900 pm_isr = I915_READ(GEN6_PMISR);
901 pm_iir = I915_READ(GEN6_PMIIR);
Paulo Zanoni9dd3c602014-08-01 18:14:48 -0300902 }
Oscar Mateo6b7a6a72018-05-10 14:59:55 -0700903 pm_mask = I915_READ(GEN6_PMINTRMSK);
904
Sagar Arun Kamble960e5462017-10-10 22:29:59 +0100905 seq_printf(m, "Video Turbo Mode: %s\n",
906 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
907 seq_printf(m, "HW control enabled: %s\n",
908 yesno(rpmodectl & GEN6_RP_ENABLE));
909 seq_printf(m, "SW control enabled: %s\n",
910 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
911 GEN6_RP_MEDIA_SW_MODE));
Oscar Mateo6b7a6a72018-05-10 14:59:55 -0700912
913 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
914 pm_ier, pm_imr, pm_mask);
915 if (INTEL_GEN(dev_priv) <= 10)
916 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
917 pm_isr, pm_iir);
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +0530918 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100919 rps->pm_intrmsk_mbz);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800920 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800921 seq_printf(m, "Render p-state ratio: %d\n",
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -0700922 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800923 seq_printf(m, "Render p-state VID: %d\n",
924 gt_perf_status & 0xff);
925 seq_printf(m, "Render p-state limit: %d\n",
926 rp_state_limits & 0xff);
Chris Wilson0d8f9492014-03-27 09:06:14 +0000927 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
928 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
929 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
930 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
Chris Wilson8e8c06c2013-08-26 19:51:01 -0300931 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
Ben Widawskyf82855d2013-01-29 12:00:15 -0800932 seq_printf(m, "CAGF: %dMHz\n", cagf);
Akash Goeld6cda9c2016-04-23 00:05:46 +0530933 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
934 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
935 seq_printf(m, "RP CUR UP: %d (%dus)\n",
936 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
937 seq_printf(m, "RP PREV UP: %d (%dus)\n",
938 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
Chris Wilson60548c52018-07-31 14:26:29 +0100939 seq_printf(m, "Up threshold: %d%%\n",
940 rps->power.up_threshold);
Chris Wilsond86ed342015-04-27 13:41:19 +0100941
Akash Goeld6cda9c2016-04-23 00:05:46 +0530942 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
943 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
944 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
945 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
946 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
947 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
Chris Wilson60548c52018-07-31 14:26:29 +0100948 seq_printf(m, "Down threshold: %d%%\n",
949 rps->power.down_threshold);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800950
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200951 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
Bob Paauwe35040562015-06-25 14:54:07 -0700952 rp_state_cap >> 16) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -0700953 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +0300954 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800955 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200956 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800957
958 max_freq = (rp_state_cap & 0xff00) >> 8;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -0700959 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +0300960 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800961 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200962 intel_gpu_freq(dev_priv, max_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800963
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200964 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
Bob Paauwe35040562015-06-25 14:54:07 -0700965 rp_state_cap >> 0) & 0xff;
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -0700966 max_freq *= (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +0300967 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800968 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
Ville Syrjälä7c59a9c12015-01-23 21:04:26 +0200969 intel_gpu_freq(dev_priv, max_freq));
Ben Widawsky31c77382013-04-05 14:29:22 -0700970 seq_printf(m, "Max overclocked frequency: %dMHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100971 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsonaed242f2015-03-18 09:48:21 +0000972
Chris Wilsond86ed342015-04-27 13:41:19 +0100973 seq_printf(m, "Current freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100974 intel_gpu_freq(dev_priv, rps->cur_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +0100975 seq_printf(m, "Actual freq: %d MHz\n", cagf);
Chris Wilsonaed242f2015-03-18 09:48:21 +0000976 seq_printf(m, "Idle freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100977 intel_gpu_freq(dev_priv, rps->idle_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +0100978 seq_printf(m, "Min freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100979 intel_gpu_freq(dev_priv, rps->min_freq));
Chris Wilson29ecd78d2016-07-13 09:10:35 +0100980 seq_printf(m, "Boost freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100981 intel_gpu_freq(dev_priv, rps->boost_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +0100982 seq_printf(m, "Max freq: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100983 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilsond86ed342015-04-27 13:41:19 +0100984 seq_printf(m,
985 "efficient (RPe) frequency: %d MHz\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100986 intel_gpu_freq(dev_priv, rps->efficient_freq));
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800987 } else {
Damien Lespiau267f0c92013-06-24 22:59:48 +0100988 seq_puts(m, "no P-state info available\n");
Jesse Barnes3b8d8d92010-12-17 14:19:02 -0800989 }
Jesse Barnesf97108d2010-01-29 11:27:07 -0800990
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200991 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
Mika Kahola1170f282015-09-25 14:00:32 +0300992 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
993 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
994
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -0700995 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -0200996 return ret;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800997}
998
Ben Widawskyd6369512016-09-20 16:54:32 +0300999static void i915_instdone_info(struct drm_i915_private *dev_priv,
1000 struct seq_file *m,
1001 struct intel_instdone *instdone)
1002{
Ben Widawskyf9e61372016-09-20 16:54:33 +03001003 int slice;
1004 int subslice;
1005
Ben Widawskyd6369512016-09-20 16:54:32 +03001006 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1007 instdone->instdone);
1008
1009 if (INTEL_GEN(dev_priv) <= 3)
1010 return;
1011
1012 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1013 instdone->slice_common);
1014
1015 if (INTEL_GEN(dev_priv) <= 6)
1016 return;
1017
Jani Nikulaa10f3612019-05-29 11:21:50 +03001018 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
Ben Widawskyf9e61372016-09-20 16:54:33 +03001019 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1020 slice, subslice, instdone->sampler[slice][subslice]);
1021
Jani Nikulaa10f3612019-05-29 11:21:50 +03001022 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
Ben Widawskyf9e61372016-09-20 16:54:33 +03001023 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1024 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03001025}
1026
Chris Wilsonf6544492015-01-26 18:03:04 +02001027static int i915_hangcheck_info(struct seq_file *m, void *unused)
1028{
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001029 struct drm_i915_private *i915 = node_to_i915(m->private);
1030 struct intel_gt *gt = &i915->gt;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001031 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +00001032 intel_wakeref_t wakeref;
Dave Gordonc3232b12016-03-23 18:19:53 +00001033 enum intel_engine_id id;
Chris Wilsonf6544492015-01-26 18:03:04 +02001034
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001035 seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
1036 if (test_bit(I915_WEDGED, &gt->reset.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001037 seq_puts(m, "\tWedged\n");
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001038 if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
Chris Wilson2caffbf2019-02-08 15:37:03 +00001039 seq_puts(m, "\tDevice (global) reset in progress\n");
Chris Wilson8af29b02016-09-09 14:11:47 +01001040
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001041 if (!i915_modparams.enable_hangcheck) {
Chris Wilson8c185ec2017-03-16 17:13:02 +00001042 seq_puts(m, "Hangcheck disabled\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001043 return 0;
1044 }
1045
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001046 if (timer_pending(&gt->hangcheck.work.timer))
Chris Wilson8352aea2017-03-03 09:00:56 +00001047 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001048 jiffies_to_msecs(gt->hangcheck.work.timer.expires -
Chris Wilsonf6544492015-01-26 18:03:04 +02001049 jiffies));
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001050 else if (delayed_work_pending(&gt->hangcheck.work))
Chris Wilson8352aea2017-03-03 09:00:56 +00001051 seq_puts(m, "Hangcheck active, work pending\n");
1052 else
1053 seq_puts(m, "Hangcheck inactive\n");
Chris Wilsonf6544492015-01-26 18:03:04 +02001054
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001055 seq_printf(m, "GT active? %s\n", yesno(gt->awake));
Chris Wilsonf73b5672017-03-02 15:03:56 +00001056
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001057 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1058 for_each_engine(engine, i915, id) {
Chris Wilsonb8cade52019-07-04 21:04:55 +01001059 struct intel_instdone instdone;
Mika Kuoppala3fe3b032016-11-18 15:09:04 +02001060
Chris Wilsonb8cade52019-07-04 21:04:55 +01001061 seq_printf(m, "%s: %d ms ago\n",
1062 engine->name,
1063 jiffies_to_msecs(jiffies -
1064 engine->hangcheck.action_timestamp));
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001065
Chris Wilsonb8cade52019-07-04 21:04:55 +01001066 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1067 (long long)engine->hangcheck.acthd,
1068 intel_engine_get_active_head(engine));
1069
1070 intel_engine_get_instdone(engine, &instdone);
1071
Ben Widawskyd6369512016-09-20 16:54:32 +03001072 seq_puts(m, "\tinstdone read =\n");
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001073 i915_instdone_info(i915, m, &instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001074
Ben Widawskyd6369512016-09-20 16:54:32 +03001075 seq_puts(m, "\tinstdone accu =\n");
Chris Wilsoncb823ed2019-07-12 20:29:53 +01001076 i915_instdone_info(i915, m,
Ben Widawskyd6369512016-09-20 16:54:32 +03001077 &engine->hangcheck.instdone);
Mika Kuoppala61642ff2015-12-01 17:56:12 +02001078 }
Chris Wilsonf6544492015-01-26 18:03:04 +02001079 }
1080
1081 return 0;
1082}
1083
Ben Widawsky4d855292011-12-12 19:34:16 -08001084static int ironlake_drpc_info(struct seq_file *m)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001085{
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001086 struct drm_i915_private *i915 = node_to_i915(m->private);
1087 struct intel_uncore *uncore = &i915->uncore;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001088 u32 rgvmodectl, rstdbyctl;
1089 u16 crstandvid;
Ben Widawsky616fdb52011-10-05 11:44:54 -07001090
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001091 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1092 rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1093 crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
Ben Widawsky616fdb52011-10-05 11:44:54 -07001094
Jani Nikula742f4912015-09-03 11:16:09 +03001095 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001096 seq_printf(m, "Boost freq: %d\n",
1097 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1098 MEMMODE_BOOST_FREQ_SHIFT);
1099 seq_printf(m, "HW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001100 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001101 seq_printf(m, "SW control enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001102 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001103 seq_printf(m, "Gated voltage change: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001104 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
Jesse Barnesf97108d2010-01-29 11:27:07 -08001105 seq_printf(m, "Starting frequency: P%d\n",
1106 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001107 seq_printf(m, "Max P-state: P%d\n",
Jesse Barnesf97108d2010-01-29 11:27:07 -08001108 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
Jesse Barnes7648fa92010-05-20 14:28:11 -07001109 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1110 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1111 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1112 seq_printf(m, "Render standby enabled: %s\n",
Jani Nikula742f4912015-09-03 11:16:09 +03001113 yesno(!(rstdbyctl & RCX_SW_EXIT)));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001114 seq_puts(m, "Current RS state: ");
Jesse Barnes88271da2011-01-05 12:01:24 -08001115 switch (rstdbyctl & RSX_STATUS_MASK) {
1116 case RSX_STATUS_ON:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001117 seq_puts(m, "on\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001118 break;
1119 case RSX_STATUS_RC1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001120 seq_puts(m, "RC1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001121 break;
1122 case RSX_STATUS_RC1E:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001123 seq_puts(m, "RC1E\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001124 break;
1125 case RSX_STATUS_RS1:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001126 seq_puts(m, "RS1\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001127 break;
1128 case RSX_STATUS_RS2:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001129 seq_puts(m, "RS2 (RC6)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001130 break;
1131 case RSX_STATUS_RS3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001132 seq_puts(m, "RC3 (RC6+)\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001133 break;
1134 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001135 seq_puts(m, "unknown\n");
Jesse Barnes88271da2011-01-05 12:01:24 -08001136 break;
1137 }
Jesse Barnesf97108d2010-01-29 11:27:07 -08001138
1139 return 0;
1140}
1141
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001142static int i915_forcewake_domains(struct seq_file *m, void *data)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001143{
Chris Wilson233ebf52017-03-23 10:19:44 +00001144 struct drm_i915_private *i915 = node_to_i915(m->private);
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001145 struct intel_uncore *uncore = &i915->uncore;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001146 struct intel_uncore_forcewake_domain *fw_domain;
Chris Wilsond2dc94b2017-03-23 10:19:41 +00001147 unsigned int tmp;
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001148
Chris Wilsond7a133d2017-09-07 14:44:41 +01001149 seq_printf(m, "user.bypass_count = %u\n",
Daniele Ceraolo Spurio0a9b2632019-08-09 07:31:16 +01001150 uncore->user_forcewake_count);
Chris Wilsond7a133d2017-09-07 14:44:41 +01001151
Daniele Ceraolo Spuriof568eee2019-03-19 11:35:35 -07001152 for_each_fw_domain(fw_domain, uncore, tmp)
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001153 seq_printf(m, "%s.wake_count = %u\n",
Tvrtko Ursulin33c582c2016-04-07 17:04:33 +01001154 intel_uncore_forcewake_domain_to_str(fw_domain->id),
Chris Wilson233ebf52017-03-23 10:19:44 +00001155 READ_ONCE(fw_domain->wake_count));
Chris Wilsonb2cff0d2015-01-16 11:34:37 +02001156
1157 return 0;
1158}
1159
Mika Kuoppala13628772017-03-15 17:43:02 +02001160static void print_rc6_res(struct seq_file *m,
1161 const char *title,
1162 const i915_reg_t reg)
1163{
1164 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1165
1166 seq_printf(m, "%s %u (%llu us)\n",
1167 title, I915_READ(reg),
1168 intel_rc6_residency_us(dev_priv, reg));
1169}
1170
Deepak S669ab5a2014-01-10 15:18:26 +05301171static int vlv_drpc_info(struct seq_file *m)
1172{
David Weinehall36cdd012016-08-22 13:59:31 +03001173 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble0d6fc922017-10-10 22:30:02 +01001174 u32 rcctl1, pw_status;
Deepak S669ab5a2014-01-10 15:18:26 +05301175
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001176 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
Deepak S669ab5a2014-01-10 15:18:26 +05301177 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1178
Deepak S669ab5a2014-01-10 15:18:26 +05301179 seq_printf(m, "RC6 Enabled: %s\n",
1180 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1181 GEN6_RC_CTL_EI_MODE(1))));
1182 seq_printf(m, "Render Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001183 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301184 seq_printf(m, "Media Power Well: %s\n",
Ville Syrjälä6b312cd2014-11-19 20:07:42 +02001185 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
Deepak S669ab5a2014-01-10 15:18:26 +05301186
Mika Kuoppala13628772017-03-15 17:43:02 +02001187 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1188 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
Imre Deak9cc19be2014-04-14 20:24:24 +03001189
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02001190 return i915_forcewake_domains(m, NULL);
Deepak S669ab5a2014-01-10 15:18:26 +05301191}
1192
Ben Widawsky4d855292011-12-12 19:34:16 -08001193static int gen6_drpc_info(struct seq_file *m)
1194{
David Weinehall36cdd012016-08-22 13:59:31 +03001195 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble960e5462017-10-10 22:29:59 +01001196 u32 gt_core_status, rcctl1, rc6vids = 0;
Akash Goelf2dd7572016-06-27 20:10:01 +05301197 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
Ben Widawsky4d855292011-12-12 19:34:16 -08001198
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001199 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
Chris Wilsoned71f1b2013-07-19 20:36:56 +01001200 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
Ben Widawsky4d855292011-12-12 19:34:16 -08001201
Ben Widawsky4d855292011-12-12 19:34:16 -08001202 rcctl1 = I915_READ(GEN6_RC_CONTROL);
David Weinehall36cdd012016-08-22 13:59:31 +03001203 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301204 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1205 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1206 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001207
Chris Wilsonebb5eb72019-04-26 09:17:21 +01001208 if (INTEL_GEN(dev_priv) <= 7)
Imre Deak51cc9ad2018-02-08 19:41:02 +02001209 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
Ville Syrjäläd284d512019-05-21 19:40:24 +03001210 &rc6vids, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001211
Eric Anholtfff24e22012-01-23 16:14:05 -08001212 seq_printf(m, "RC1e Enabled: %s\n",
Ben Widawsky4d855292011-12-12 19:34:16 -08001213 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1214 seq_printf(m, "RC6 Enabled: %s\n",
1215 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
David Weinehall36cdd012016-08-22 13:59:31 +03001216 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301217 seq_printf(m, "Render Well Gating Enabled: %s\n",
1218 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1219 seq_printf(m, "Media Well Gating Enabled: %s\n",
1220 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1221 }
Ben Widawsky4d855292011-12-12 19:34:16 -08001222 seq_printf(m, "Deep RC6 Enabled: %s\n",
1223 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1224 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1225 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001226 seq_puts(m, "Current RC state: ");
Ben Widawsky4d855292011-12-12 19:34:16 -08001227 switch (gt_core_status & GEN6_RCn_MASK) {
1228 case GEN6_RC0:
1229 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
Damien Lespiau267f0c92013-06-24 22:59:48 +01001230 seq_puts(m, "Core Power Down\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001231 else
Damien Lespiau267f0c92013-06-24 22:59:48 +01001232 seq_puts(m, "on\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001233 break;
1234 case GEN6_RC3:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001235 seq_puts(m, "RC3\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001236 break;
1237 case GEN6_RC6:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001238 seq_puts(m, "RC6\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001239 break;
1240 case GEN6_RC7:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001241 seq_puts(m, "RC7\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001242 break;
1243 default:
Damien Lespiau267f0c92013-06-24 22:59:48 +01001244 seq_puts(m, "Unknown\n");
Ben Widawsky4d855292011-12-12 19:34:16 -08001245 break;
1246 }
1247
1248 seq_printf(m, "Core Power Down: %s\n",
1249 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
David Weinehall36cdd012016-08-22 13:59:31 +03001250 if (INTEL_GEN(dev_priv) >= 9) {
Akash Goelf2dd7572016-06-27 20:10:01 +05301251 seq_printf(m, "Render Power Well: %s\n",
1252 (gen9_powergate_status &
1253 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1254 seq_printf(m, "Media Power Well: %s\n",
1255 (gen9_powergate_status &
1256 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1257 }
Ben Widawskycce66a22012-03-27 18:59:38 -07001258
1259 /* Not exactly sure what this is */
Mika Kuoppala13628772017-03-15 17:43:02 +02001260 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1261 GEN6_GT_GFX_RC6_LOCKED);
1262 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1263 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1264 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
Ben Widawskycce66a22012-03-27 18:59:38 -07001265
Imre Deak51cc9ad2018-02-08 19:41:02 +02001266 if (INTEL_GEN(dev_priv) <= 7) {
1267 seq_printf(m, "RC6 voltage: %dmV\n",
1268 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1269 seq_printf(m, "RC6+ voltage: %dmV\n",
1270 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1271 seq_printf(m, "RC6++ voltage: %dmV\n",
1272 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1273 }
1274
Akash Goelf2dd7572016-06-27 20:10:01 +05301275 return i915_forcewake_domains(m, NULL);
Ben Widawsky4d855292011-12-12 19:34:16 -08001276}
1277
1278static int i915_drpc_info(struct seq_file *m, void *unused)
1279{
David Weinehall36cdd012016-08-22 13:59:31 +03001280 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001281 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00001282 int err = -ENODEV;
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001283
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -07001284 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
Chris Wilsond4225a52019-01-14 14:21:23 +00001285 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1286 err = vlv_drpc_info(m);
1287 else if (INTEL_GEN(dev_priv) >= 6)
1288 err = gen6_drpc_info(m);
1289 else
1290 err = ironlake_drpc_info(m);
1291 }
Chris Wilsoncf632bd2017-03-13 09:56:17 +00001292
1293 return err;
Ben Widawsky4d855292011-12-12 19:34:16 -08001294}
1295
Daniel Vetter9a851782015-06-18 10:30:22 +02001296static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1297{
David Weinehall36cdd012016-08-22 13:59:31 +03001298 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniel Vetter9a851782015-06-18 10:30:22 +02001299
1300 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1301 dev_priv->fb_tracking.busy_bits);
1302
1303 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1304 dev_priv->fb_tracking.flip_bits);
1305
1306 return 0;
1307}
1308
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001309static int i915_fbc_status(struct seq_file *m, void *unused)
1310{
David Weinehall36cdd012016-08-22 13:59:31 +03001311 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilson31388722017-12-20 20:58:48 +00001312 struct intel_fbc *fbc = &dev_priv->fbc;
Chris Wilsona0371212019-01-14 14:21:14 +00001313 intel_wakeref_t wakeref;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001314
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001315 if (!HAS_FBC(dev_priv))
1316 return -ENODEV;
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001317
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001318 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
Chris Wilson31388722017-12-20 20:58:48 +00001319 mutex_lock(&fbc->lock);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001320
Paulo Zanoni0e631ad2015-10-14 17:45:36 -03001321 if (intel_fbc_is_active(dev_priv))
Damien Lespiau267f0c92013-06-24 22:59:48 +01001322 seq_puts(m, "FBC enabled\n");
Paulo Zanoni2e8144a2015-06-12 14:36:20 -03001323 else
Chris Wilson31388722017-12-20 20:58:48 +00001324 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1325
Ville Syrjälä3fd5d1e2017-06-06 15:43:18 +03001326 if (intel_fbc_is_active(dev_priv)) {
1327 u32 mask;
1328
1329 if (INTEL_GEN(dev_priv) >= 8)
1330 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1331 else if (INTEL_GEN(dev_priv) >= 7)
1332 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1333 else if (INTEL_GEN(dev_priv) >= 5)
1334 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1335 else if (IS_G4X(dev_priv))
1336 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1337 else
1338 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1339 FBC_STAT_COMPRESSED);
1340
1341 seq_printf(m, "Compressing: %s\n", yesno(mask));
Paulo Zanoni0fc6a9d2016-10-21 13:55:46 -02001342 }
Paulo Zanoni31b9df12015-06-12 14:36:18 -03001343
Chris Wilson31388722017-12-20 20:58:48 +00001344 mutex_unlock(&fbc->lock);
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001345 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001346
Jesse Barnesb5e50c32010-02-05 12:42:41 -08001347 return 0;
1348}
1349
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001350static int i915_fbc_false_color_get(void *data, u64 *val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001351{
David Weinehall36cdd012016-08-22 13:59:31 +03001352 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001353
David Weinehall36cdd012016-08-22 13:59:31 +03001354 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001355 return -ENODEV;
1356
Rodrigo Vivida46f932014-08-01 02:04:45 -07001357 *val = dev_priv->fbc.false_color;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001358
1359 return 0;
1360}
1361
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001362static int i915_fbc_false_color_set(void *data, u64 val)
Rodrigo Vivida46f932014-08-01 02:04:45 -07001363{
David Weinehall36cdd012016-08-22 13:59:31 +03001364 struct drm_i915_private *dev_priv = data;
Rodrigo Vivida46f932014-08-01 02:04:45 -07001365 u32 reg;
1366
David Weinehall36cdd012016-08-22 13:59:31 +03001367 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
Rodrigo Vivida46f932014-08-01 02:04:45 -07001368 return -ENODEV;
1369
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001370 mutex_lock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001371
1372 reg = I915_READ(ILK_DPFC_CONTROL);
1373 dev_priv->fbc.false_color = val;
1374
1375 I915_WRITE(ILK_DPFC_CONTROL, val ?
1376 (reg | FBC_CTL_FALSE_COLOR) :
1377 (reg & ~FBC_CTL_FALSE_COLOR));
1378
Paulo Zanoni25ad93f2015-07-02 19:25:10 -03001379 mutex_unlock(&dev_priv->fbc.lock);
Rodrigo Vivida46f932014-08-01 02:04:45 -07001380 return 0;
1381}
1382
Ville Syrjälä4127dc42017-06-06 15:44:12 +03001383DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1384 i915_fbc_false_color_get, i915_fbc_false_color_set,
Rodrigo Vivida46f932014-08-01 02:04:45 -07001385 "%llu\n");
1386
Paulo Zanoni92d44622013-05-31 16:33:24 -03001387static int i915_ips_status(struct seq_file *m, void *unused)
1388{
David Weinehall36cdd012016-08-22 13:59:31 +03001389 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001390 intel_wakeref_t wakeref;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001391
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001392 if (!HAS_IPS(dev_priv))
1393 return -ENODEV;
Paulo Zanoni92d44622013-05-31 16:33:24 -03001394
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001395 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001396
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001397 seq_printf(m, "Enabled by kernel parameter: %s\n",
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00001398 yesno(i915_modparams.enable_ips));
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001399
David Weinehall36cdd012016-08-22 13:59:31 +03001400 if (INTEL_GEN(dev_priv) >= 8) {
Rodrigo Vivi0eaa53f2014-06-30 04:45:01 -07001401 seq_puts(m, "Currently: unknown\n");
1402 } else {
1403 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1404 seq_puts(m, "Currently: enabled\n");
1405 else
1406 seq_puts(m, "Currently: disabled\n");
1407 }
Paulo Zanoni92d44622013-05-31 16:33:24 -03001408
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001409 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001410
Paulo Zanoni92d44622013-05-31 16:33:24 -03001411 return 0;
1412}
1413
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001414static int i915_sr_status(struct seq_file *m, void *unused)
1415{
David Weinehall36cdd012016-08-22 13:59:31 +03001416 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001417 intel_wakeref_t wakeref;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001418 bool sr_enabled = false;
1419
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001420 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001421
Chris Wilson7342a722017-03-09 14:20:49 +00001422 if (INTEL_GEN(dev_priv) >= 9)
1423 /* no global SR status; inspect per-plane WM */;
1424 else if (HAS_PCH_SPLIT(dev_priv))
Chris Wilson5ba2aaa2010-08-19 18:04:08 +01001425 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
Jani Nikulac0f86832016-12-07 12:13:04 +02001426 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
David Weinehall36cdd012016-08-22 13:59:31 +03001427 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001428 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001429 else if (IS_I915GM(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001430 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001431 else if (IS_PINEVIEW(dev_priv))
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001432 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
David Weinehall36cdd012016-08-22 13:59:31 +03001433 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ander Conselvan de Oliveira77b64552015-06-02 14:17:47 +03001434 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001435
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001436 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
Paulo Zanoni36623ef2014-02-21 13:52:23 -03001437
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +00001438 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
Jesse Barnes4a9bef32010-02-05 12:47:35 -08001439
1440 return 0;
1441}
1442
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001443static int i915_ring_freq_table(struct seq_file *m, void *unused)
1444{
David Weinehall36cdd012016-08-22 13:59:31 +03001445 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001446 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Akash Goelf936ec32015-06-29 14:50:22 +05301447 unsigned int max_gpu_freq, min_gpu_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001448 intel_wakeref_t wakeref;
Chris Wilsond586b5f2018-03-08 14:26:48 +00001449 int gpu_freq, ia_freq;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001450
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001451 if (!HAS_LLC(dev_priv))
1452 return -ENODEV;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001453
Chris Wilsond586b5f2018-03-08 14:26:48 +00001454 min_gpu_freq = rps->min_freq;
1455 max_gpu_freq = rps->max_freq;
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001456 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Akash Goelf936ec32015-06-29 14:50:22 +05301457 /* Convert GT frequency to 50 HZ units */
Chris Wilsond586b5f2018-03-08 14:26:48 +00001458 min_gpu_freq /= GEN9_FREQ_SCALER;
1459 max_gpu_freq /= GEN9_FREQ_SCALER;
Akash Goelf936ec32015-06-29 14:50:22 +05301460 }
1461
Damien Lespiau267f0c92013-06-24 22:59:48 +01001462 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001463
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001464 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
Akash Goelf936ec32015-06-29 14:50:22 +05301465 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
Ben Widawsky42c05262012-09-26 10:34:00 -07001466 ia_freq = gpu_freq;
1467 sandybridge_pcode_read(dev_priv,
1468 GEN6_PCODE_READ_MIN_FREQ_TABLE,
Ville Syrjäläd284d512019-05-21 19:40:24 +03001469 &ia_freq, NULL);
Chris Wilson3ebecd02013-04-12 19:10:13 +01001470 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
Akash Goelf936ec32015-06-29 14:50:22 +05301471 intel_gpu_freq(dev_priv, (gpu_freq *
Rodrigo Vivi35ceabf2017-07-06 13:41:13 -07001472 (IS_GEN9_BC(dev_priv) ||
Oscar Mateo2b2874e2018-04-05 17:00:52 +03001473 INTEL_GEN(dev_priv) >= 10 ?
Rodrigo Vivib976dc52017-01-23 10:32:37 -08001474 GEN9_FREQ_SCALER : 1))),
Chris Wilson3ebecd02013-04-12 19:10:13 +01001475 ((ia_freq >> 0) & 0xff) * 100,
1476 ((ia_freq >> 8) & 0xff) * 100);
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001477 }
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001478 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
Chris Wilsonebb5eb72019-04-26 09:17:21 +01001479
1480 return 0;
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07001481}
1482
Chris Wilson44834a62010-08-19 16:09:23 +01001483static int i915_opregion(struct seq_file *m, void *unused)
1484{
David Weinehall36cdd012016-08-22 13:59:31 +03001485 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1486 struct drm_device *dev = &dev_priv->drm;
Chris Wilson44834a62010-08-19 16:09:23 +01001487 struct intel_opregion *opregion = &dev_priv->opregion;
1488 int ret;
1489
1490 ret = mutex_lock_interruptible(&dev->struct_mutex);
1491 if (ret)
Daniel Vetter0d38f002012-04-21 22:49:10 +02001492 goto out;
Chris Wilson44834a62010-08-19 16:09:23 +01001493
Jani Nikula2455a8e2015-12-14 12:50:53 +02001494 if (opregion->header)
1495 seq_write(m, opregion->header, OPREGION_SIZE);
Chris Wilson44834a62010-08-19 16:09:23 +01001496
1497 mutex_unlock(&dev->struct_mutex);
1498
Daniel Vetter0d38f002012-04-21 22:49:10 +02001499out:
Chris Wilson44834a62010-08-19 16:09:23 +01001500 return 0;
1501}
1502
Jani Nikulaada8f952015-12-15 13:17:12 +02001503static int i915_vbt(struct seq_file *m, void *unused)
1504{
David Weinehall36cdd012016-08-22 13:59:31 +03001505 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
Jani Nikulaada8f952015-12-15 13:17:12 +02001506
1507 if (opregion->vbt)
1508 seq_write(m, opregion->vbt, opregion->vbt_size);
1509
1510 return 0;
1511}
1512
Chris Wilson37811fc2010-08-25 22:45:57 +01001513static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1514{
David Weinehall36cdd012016-08-22 13:59:31 +03001515 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1516 struct drm_device *dev = &dev_priv->drm;
Namrta Salonieb13b8402015-11-27 13:43:11 +05301517 struct intel_framebuffer *fbdev_fb = NULL;
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001518 struct drm_framebuffer *drm_fb;
Chris Wilson188c1ab2016-04-03 14:14:20 +01001519 int ret;
1520
1521 ret = mutex_lock_interruptible(&dev->struct_mutex);
1522 if (ret)
1523 return ret;
Chris Wilson37811fc2010-08-25 22:45:57 +01001524
Daniel Vetter06957262015-08-10 13:34:08 +02001525#ifdef CONFIG_DRM_FBDEV_EMULATION
Daniel Vetter346fb4e2017-07-06 15:00:20 +02001526 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
David Weinehall36cdd012016-08-22 13:59:31 +03001527 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
Chris Wilson37811fc2010-08-25 22:45:57 +01001528
Chris Wilson25bcce92016-07-02 15:36:00 +01001529 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1530 fbdev_fb->base.width,
1531 fbdev_fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001532 fbdev_fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001533 fbdev_fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001534 fbdev_fb->base.modifier,
Chris Wilson25bcce92016-07-02 15:36:00 +01001535 drm_framebuffer_read_refcount(&fbdev_fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001536 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
Chris Wilson25bcce92016-07-02 15:36:00 +01001537 seq_putc(m, '\n');
1538 }
Daniel Vetter4520f532013-10-09 09:18:51 +02001539#endif
Chris Wilson37811fc2010-08-25 22:45:57 +01001540
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001541 mutex_lock(&dev->mode_config.fb_lock);
Daniel Vetter3a58ee12015-07-10 19:02:51 +02001542 drm_for_each_fb(drm_fb, dev) {
Namrta Salonieb13b8402015-11-27 13:43:11 +05301543 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1544 if (fb == fbdev_fb)
Chris Wilson37811fc2010-08-25 22:45:57 +01001545 continue;
1546
Tvrtko Ursulinc1ca506d2015-02-10 17:16:07 +00001547 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
Chris Wilson37811fc2010-08-25 22:45:57 +01001548 fb->base.width,
1549 fb->base.height,
Ville Syrjäläb00c6002016-12-14 23:31:35 +02001550 fb->base.format->depth,
Ville Syrjälä272725c2016-12-14 23:32:20 +02001551 fb->base.format->cpp[0] * 8,
Ville Syrjäläbae781b2016-11-16 13:33:16 +02001552 fb->base.modifier,
Dave Airlie747a5982016-04-15 15:10:35 +10001553 drm_framebuffer_read_refcount(&fb->base));
Daniel Stonea5ff7a42018-05-18 15:30:07 +01001554 describe_obj(m, intel_fb_obj(&fb->base));
Damien Lespiau267f0c92013-06-24 22:59:48 +01001555 seq_putc(m, '\n');
Chris Wilson37811fc2010-08-25 22:45:57 +01001556 }
Daniel Vetter4b096ac2012-12-10 21:19:18 +01001557 mutex_unlock(&dev->mode_config.fb_lock);
Chris Wilson188c1ab2016-04-03 14:14:20 +01001558 mutex_unlock(&dev->struct_mutex);
Chris Wilson37811fc2010-08-25 22:45:57 +01001559
1560 return 0;
1561}
1562
Chris Wilson7e37f882016-08-02 22:50:21 +01001563static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001564{
Chris Wilsonef5032a2018-03-07 13:42:24 +00001565 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1566 ring->space, ring->head, ring->tail, ring->emit);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001567}
1568
Ben Widawskye76d3632011-03-19 18:14:29 -07001569static int i915_context_status(struct seq_file *m, void *unused)
1570{
David Weinehall36cdd012016-08-22 13:59:31 +03001571 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1572 struct drm_device *dev = &dev_priv->drm;
Chris Wilsone2efd132016-05-24 14:53:34 +01001573 struct i915_gem_context *ctx;
Dave Gordonc3232b12016-03-23 18:19:53 +00001574 int ret;
Ben Widawskye76d3632011-03-19 18:14:29 -07001575
Daniel Vetterf3d28872014-05-29 23:23:08 +02001576 ret = mutex_lock_interruptible(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001577 if (ret)
1578 return ret;
1579
Chris Wilson829a0af2017-06-20 12:05:45 +01001580 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
Chris Wilson02684442019-04-26 17:33:35 +01001581 struct i915_gem_engines_iter it;
Chris Wilson7e3d9a52019-03-08 13:25:16 +00001582 struct intel_context *ce;
1583
Chris Wilson288f1ce2018-09-04 16:31:17 +01001584 seq_puts(m, "HW context ");
1585 if (!list_empty(&ctx->hw_id_link))
1586 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1587 atomic_read(&ctx->hw_id_pin_count));
Chris Wilsonc84455b2016-08-15 10:49:08 +01001588 if (ctx->pid) {
Chris Wilsond28b99a2016-05-24 14:53:39 +01001589 struct task_struct *task;
1590
Chris Wilsonc84455b2016-08-15 10:49:08 +01001591 task = get_pid_task(ctx->pid, PIDTYPE_PID);
Chris Wilsond28b99a2016-05-24 14:53:39 +01001592 if (task) {
1593 seq_printf(m, "(%s [%d]) ",
1594 task->comm, task->pid);
1595 put_task_struct(task);
1596 }
Chris Wilsonc84455b2016-08-15 10:49:08 +01001597 } else if (IS_ERR(ctx->file_priv)) {
1598 seq_puts(m, "(deleted) ");
Chris Wilsond28b99a2016-05-24 14:53:39 +01001599 } else {
1600 seq_puts(m, "(kernel) ");
1601 }
1602
Chris Wilsonbca44d82016-05-24 14:53:41 +01001603 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1604 seq_putc(m, '\n');
Ben Widawskya33afea2013-09-17 21:12:45 -07001605
Chris Wilson02684442019-04-26 17:33:35 +01001606 for_each_gem_engine(ce,
1607 i915_gem_context_lock_engines(ctx), it) {
Chris Wilson48ae3972019-08-09 19:25:17 +01001608 intel_context_lock_pinned(ce);
1609 if (intel_context_is_pinned(ce)) {
1610 seq_printf(m, "%s: ", ce->engine->name);
1611 if (ce->state)
1612 describe_obj(m, ce->state->obj);
Chris Wilson7e37f882016-08-02 22:50:21 +01001613 describe_ctx_ring(m, ce->ring);
Chris Wilson48ae3972019-08-09 19:25:17 +01001614 seq_putc(m, '\n');
1615 }
1616 intel_context_unlock_pinned(ce);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001617 }
Chris Wilson02684442019-04-26 17:33:35 +01001618 i915_gem_context_unlock_engines(ctx);
Oscar Mateoc9fe99b2014-07-24 17:04:46 +01001619
Ben Widawskya33afea2013-09-17 21:12:45 -07001620 seq_putc(m, '\n');
Ben Widawskya168c292013-02-14 15:05:12 -08001621 }
1622
Daniel Vetterf3d28872014-05-29 23:23:08 +02001623 mutex_unlock(&dev->struct_mutex);
Ben Widawskye76d3632011-03-19 18:14:29 -07001624
1625 return 0;
1626}
1627
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001628static const char *swizzle_string(unsigned swizzle)
1629{
Damien Lespiauaee56cf2013-06-24 22:59:49 +01001630 switch (swizzle) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001631 case I915_BIT_6_SWIZZLE_NONE:
1632 return "none";
1633 case I915_BIT_6_SWIZZLE_9:
1634 return "bit9";
1635 case I915_BIT_6_SWIZZLE_9_10:
1636 return "bit9/bit10";
1637 case I915_BIT_6_SWIZZLE_9_11:
1638 return "bit9/bit11";
1639 case I915_BIT_6_SWIZZLE_9_10_11:
1640 return "bit9/bit10/bit11";
1641 case I915_BIT_6_SWIZZLE_9_17:
1642 return "bit9/bit17";
1643 case I915_BIT_6_SWIZZLE_9_10_17:
1644 return "bit9/bit10/bit17";
1645 case I915_BIT_6_SWIZZLE_UNKNOWN:
Masanari Iida8a168ca2012-12-29 02:00:09 +09001646 return "unknown";
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001647 }
1648
1649 return "bug";
1650}
1651
1652static int i915_swizzle_info(struct seq_file *m, void *data)
1653{
David Weinehall36cdd012016-08-22 13:59:31 +03001654 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001655 struct intel_uncore *uncore = &dev_priv->uncore;
Chris Wilsona0371212019-01-14 14:21:14 +00001656 intel_wakeref_t wakeref;
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001657
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001658 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
Daniel Vetter22bcfc62012-08-09 15:07:02 +02001659
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001660 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1661 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1662 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1663 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1664
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08001665 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001666 seq_printf(m, "DDC = 0x%08x\n",
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001667 intel_uncore_read(uncore, DCC));
Daniel Vetter656bfa32014-11-20 09:26:30 +01001668 seq_printf(m, "DDC2 = 0x%08x\n",
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001669 intel_uncore_read(uncore, DCC2));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001670 seq_printf(m, "C0DRB3 = 0x%04x\n",
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001671 intel_uncore_read16(uncore, C0DRB3));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001672 seq_printf(m, "C1DRB3 = 0x%04x\n",
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001673 intel_uncore_read16(uncore, C1DRB3));
David Weinehall36cdd012016-08-22 13:59:31 +03001674 } else if (INTEL_GEN(dev_priv) >= 6) {
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001675 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001676 intel_uncore_read(uncore, MAD_DIMM_C0));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001677 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001678 intel_uncore_read(uncore, MAD_DIMM_C1));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001679 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001680 intel_uncore_read(uncore, MAD_DIMM_C2));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001681 seq_printf(m, "TILECTL = 0x%08x\n",
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001682 intel_uncore_read(uncore, TILECTL));
David Weinehall36cdd012016-08-22 13:59:31 +03001683 if (INTEL_GEN(dev_priv) >= 8)
Ben Widawsky9d3203e2013-11-02 21:07:14 -07001684 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001685 intel_uncore_read(uncore, GAMTARBMODE));
Ben Widawsky9d3203e2013-11-02 21:07:14 -07001686 else
1687 seq_printf(m, "ARB_MODE = 0x%08x\n",
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001688 intel_uncore_read(uncore, ARB_MODE));
Daniel Vetter3fa7d232012-01-31 16:47:56 +01001689 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
Tvrtko Ursulin4f5fd912019-06-11 11:45:48 +01001690 intel_uncore_read(uncore, DISP_ARB_CTL));
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001691 }
Daniel Vetter656bfa32014-11-20 09:26:30 +01001692
1693 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1694 seq_puts(m, "L-shaped memory detected\n");
1695
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07001696 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
Daniel Vetterea16a3c2011-12-14 13:57:16 +01001697
1698 return 0;
1699}
1700
Chris Wilson7466c292016-08-15 09:49:33 +01001701static const char *rps_power_to_str(unsigned int power)
1702{
1703 static const char * const strings[] = {
1704 [LOW_POWER] = "low power",
1705 [BETWEEN] = "mixed",
1706 [HIGH_POWER] = "high power",
1707 };
1708
1709 if (power >= ARRAY_SIZE(strings) || !strings[power])
1710 return "unknown";
1711
1712 return strings[power];
1713}
1714
Chris Wilson1854d5c2015-04-07 16:20:32 +01001715static int i915_rps_boost_info(struct seq_file *m, void *data)
1716{
David Weinehall36cdd012016-08-22 13:59:31 +03001717 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001718 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001719 u32 act_freq = rps->cur_freq;
Chris Wilsona0371212019-01-14 14:21:14 +00001720 intel_wakeref_t wakeref;
Chris Wilson1854d5c2015-04-07 16:20:32 +01001721
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -07001722 with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001723 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Chris Wilson337fa6e2019-04-26 09:17:20 +01001724 vlv_punit_get(dev_priv);
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001725 act_freq = vlv_punit_read(dev_priv,
1726 PUNIT_REG_GPU_FREQ_STS);
Chris Wilson337fa6e2019-04-26 09:17:20 +01001727 vlv_punit_put(dev_priv);
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001728 act_freq = (act_freq >> 8) & 0xff;
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001729 } else {
1730 act_freq = intel_get_cagf(dev_priv,
1731 I915_READ(GEN6_RPSTAT1));
1732 }
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001733 }
1734
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001735 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
Chris Wilson79ffac852019-04-24 21:07:17 +01001736 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
Chris Wilson7b92c1b2017-06-28 13:35:48 +01001737 seq_printf(m, "Boosts outstanding? %d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001738 atomic_read(&rps->num_waiters));
Chris Wilson60548c52018-07-31 14:26:29 +01001739 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
Chris Wilsonc0a6aa72018-10-02 12:32:21 +01001740 seq_printf(m, "Frequency requested %d, actual %d\n",
1741 intel_gpu_freq(dev_priv, rps->cur_freq),
1742 intel_gpu_freq(dev_priv, act_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01001743 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001744 intel_gpu_freq(dev_priv, rps->min_freq),
1745 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
1746 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
1747 intel_gpu_freq(dev_priv, rps->max_freq));
Chris Wilson7466c292016-08-15 09:49:33 +01001748 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001749 intel_gpu_freq(dev_priv, rps->idle_freq),
1750 intel_gpu_freq(dev_priv, rps->efficient_freq),
1751 intel_gpu_freq(dev_priv, rps->boost_freq));
Daniel Vetter1d2ac402016-04-26 19:29:41 +02001752
Chris Wilson62eb3c22019-02-13 09:25:04 +00001753 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
Chris Wilson1854d5c2015-04-07 16:20:32 +01001754
Chris Wilson79ffac852019-04-24 21:07:17 +01001755 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
Chris Wilson7466c292016-08-15 09:49:33 +01001756 u32 rpup, rpupei;
1757 u32 rpdown, rpdownei;
1758
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001759 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson7466c292016-08-15 09:49:33 +01001760 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1761 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1762 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1763 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07001764 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
Chris Wilson7466c292016-08-15 09:49:33 +01001765
1766 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
Chris Wilson60548c52018-07-31 14:26:29 +01001767 rps_power_to_str(rps->power.mode));
Chris Wilson7466c292016-08-15 09:49:33 +01001768 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00001769 rpup && rpupei ? 100 * rpup / rpupei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01001770 rps->power.up_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01001771 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
Chris Wilson23f4a282017-02-18 11:27:08 +00001772 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
Chris Wilson60548c52018-07-31 14:26:29 +01001773 rps->power.down_threshold);
Chris Wilson7466c292016-08-15 09:49:33 +01001774 } else {
1775 seq_puts(m, "\nRPS Autotuning inactive\n");
1776 }
1777
Chris Wilson8d3afd72015-05-21 21:01:47 +01001778 return 0;
Chris Wilson1854d5c2015-04-07 16:20:32 +01001779}
1780
Ben Widawsky63573eb2013-07-04 11:02:07 -07001781static int i915_llc(struct seq_file *m, void *data)
1782{
David Weinehall36cdd012016-08-22 13:59:31 +03001783 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Mika Kuoppala3accaf72016-04-13 17:26:43 +03001784 const bool edram = INTEL_GEN(dev_priv) > 8;
Ben Widawsky63573eb2013-07-04 11:02:07 -07001785
David Weinehall36cdd012016-08-22 13:59:31 +03001786 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
Daniele Ceraolo Spuriof6ac9932019-03-28 10:45:32 -07001787 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1788 dev_priv->edram_size_mb);
Ben Widawsky63573eb2013-07-04 11:02:07 -07001789
1790 return 0;
1791}
1792
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08001793static int i915_huc_load_status_info(struct seq_file *m, void *data)
1794{
1795 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001796 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00001797 struct drm_printer p;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08001798
Daniele Ceraolo Spurio702668e2019-07-24 17:18:06 -07001799 if (!HAS_GT_UC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001800 return -ENODEV;
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08001801
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00001802 p = drm_seq_file_printer(m);
Daniele Ceraolo Spurio8b5689d2019-07-13 11:00:12 +01001803 intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08001804
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -07001805 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
Chris Wilsond4225a52019-01-14 14:21:23 +00001806 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08001807
1808 return 0;
1809}
1810
Alex Daifdf5d352015-08-12 15:43:37 +01001811static int i915_guc_load_status_info(struct seq_file *m, void *data)
1812{
David Weinehall36cdd012016-08-22 13:59:31 +03001813 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00001814 intel_wakeref_t wakeref;
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00001815 struct drm_printer p;
Alex Daifdf5d352015-08-12 15:43:37 +01001816
Daniele Ceraolo Spurio702668e2019-07-24 17:18:06 -07001817 if (!HAS_GT_UC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001818 return -ENODEV;
Alex Daifdf5d352015-08-12 15:43:37 +01001819
Michal Wajdeczko56ffc742017-10-17 09:44:49 +00001820 p = drm_seq_file_printer(m);
Daniele Ceraolo Spurio8b5689d2019-07-13 11:00:12 +01001821 intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
Alex Daifdf5d352015-08-12 15:43:37 +01001822
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -07001823 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
Chris Wilsond4225a52019-01-14 14:21:23 +00001824 u32 tmp = I915_READ(GUC_STATUS);
1825 u32 i;
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05301826
Chris Wilsond4225a52019-01-14 14:21:23 +00001827 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1828 seq_printf(m, "\tBootrom status = 0x%x\n",
1829 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1830 seq_printf(m, "\tuKernel status = 0x%x\n",
1831 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1832 seq_printf(m, "\tMIA Core status = 0x%x\n",
1833 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1834 seq_puts(m, "\nScratch registers:\n");
1835 for (i = 0; i < 16; i++) {
1836 seq_printf(m, "\t%2d: \t0x%x\n",
1837 i, I915_READ(SOFT_SCRATCH(i)));
1838 }
1839 }
sagar.a.kamble@intel.com3582ad12017-02-03 13:58:33 +05301840
Alex Daifdf5d352015-08-12 15:43:37 +01001841 return 0;
1842}
1843
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01001844static const char *
1845stringify_guc_log_type(enum guc_log_buffer_type type)
1846{
1847 switch (type) {
1848 case GUC_ISR_LOG_BUFFER:
1849 return "ISR";
1850 case GUC_DPC_LOG_BUFFER:
1851 return "DPC";
1852 case GUC_CRASH_DUMP_LOG_BUFFER:
1853 return "CRASH";
1854 default:
1855 MISSING_CASE(type);
1856 }
1857
1858 return "";
1859}
1860
Akash Goel5aa1ee42016-10-12 21:54:36 +05301861static void i915_guc_log_info(struct seq_file *m,
1862 struct drm_i915_private *dev_priv)
1863{
Daniele Ceraolo Spurio8b5689d2019-07-13 11:00:12 +01001864 struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01001865 enum guc_log_buffer_type type;
1866
1867 if (!intel_guc_log_relay_enabled(log)) {
1868 seq_puts(m, "GuC log relay disabled\n");
1869 return;
1870 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05301871
Michał Winiarskidb557992018-03-19 10:53:43 +01001872 seq_puts(m, "GuC logging stats:\n");
Akash Goel5aa1ee42016-10-12 21:54:36 +05301873
Michał Winiarski6a96be22018-03-19 10:53:42 +01001874 seq_printf(m, "\tRelay full count: %u\n",
Michał Winiarski5e24e4a2018-03-19 10:53:44 +01001875 log->relay.full_count);
1876
1877 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1878 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1879 stringify_guc_log_type(type),
1880 log->stats[type].flush,
1881 log->stats[type].sampled_overflow);
1882 }
Akash Goel5aa1ee42016-10-12 21:54:36 +05301883}
1884
Dave Gordon8b417c22015-08-12 15:43:44 +01001885static void i915_guc_client_info(struct seq_file *m,
1886 struct drm_i915_private *dev_priv,
Sagar Arun Kamble5afc8b42017-11-16 19:02:40 +05301887 struct intel_guc_client *client)
Dave Gordon8b417c22015-08-12 15:43:44 +01001888{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001889 struct intel_engine_cs *engine;
Jani Nikulae5315212019-01-16 11:15:23 +02001890 u64 tot = 0;
Dave Gordon8b417c22015-08-12 15:43:44 +01001891
Oscar Mateob09935a2017-03-22 10:39:53 -07001892 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
1893 client->priority, client->stage_id, client->proc_desc_offset);
Michał Winiarski59db36c2017-09-14 12:51:23 +02001894 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
1895 client->doorbell_id, client->doorbell_offset);
Dave Gordon8b417c22015-08-12 15:43:44 +01001896
Chris Wilson750e76b2019-08-06 13:43:00 +01001897 for_each_uabi_engine(engine, dev_priv) {
1898 u64 submissions = client->submissions[engine->guc_id];
Dave Gordonc18468c2016-08-09 15:19:22 +01001899 tot += submissions;
Dave Gordon8b417c22015-08-12 15:43:44 +01001900 seq_printf(m, "\tSubmissions: %llu %s\n",
Dave Gordonc18468c2016-08-09 15:19:22 +01001901 submissions, engine->name);
Dave Gordon8b417c22015-08-12 15:43:44 +01001902 }
1903 seq_printf(m, "\tTotal: %llu\n", tot);
1904}
1905
1906static int i915_guc_info(struct seq_file *m, void *data)
1907{
David Weinehall36cdd012016-08-22 13:59:31 +03001908 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniele Ceraolo Spurio8b5689d2019-07-13 11:00:12 +01001909 const struct intel_guc *guc = &dev_priv->gt.uc.guc;
Dave Gordon8b417c22015-08-12 15:43:44 +01001910
Michał Winiarskidb557992018-03-19 10:53:43 +01001911 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001912 return -ENODEV;
1913
Michał Winiarskidb557992018-03-19 10:53:43 +01001914 i915_guc_log_info(m, dev_priv);
1915
1916 if (!USES_GUC_SUBMISSION(dev_priv))
1917 return 0;
1918
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001919 GEM_BUG_ON(!guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01001920
Michał Winiarskidb557992018-03-19 10:53:43 +01001921 seq_printf(m, "\nDoorbell map:\n");
Joonas Lahtinenabddffd2017-03-22 10:39:44 -07001922 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
Michał Winiarskidb557992018-03-19 10:53:43 +01001923 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
Dave Gordon9636f6d2016-06-13 17:57:28 +01001924
Chris Wilson334636c2016-11-29 12:10:20 +00001925 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
1926 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
Dave Gordon8b417c22015-08-12 15:43:44 +01001927
1928 /* Add more as required ... */
1929
1930 return 0;
1931}
1932
Oscar Mateoa8b93702017-05-10 15:04:51 +00001933static int i915_guc_stage_pool(struct seq_file *m, void *data)
Alex Dai4c7e77f2015-08-12 15:43:40 +01001934{
David Weinehall36cdd012016-08-22 13:59:31 +03001935 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Daniele Ceraolo Spurio8b5689d2019-07-13 11:00:12 +01001936 const struct intel_guc *guc = &dev_priv->gt.uc.guc;
Oscar Mateoa8b93702017-05-10 15:04:51 +00001937 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
Oscar Mateoa8b93702017-05-10 15:04:51 +00001938 int index;
Alex Dai4c7e77f2015-08-12 15:43:40 +01001939
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001940 if (!USES_GUC_SUBMISSION(dev_priv))
1941 return -ENODEV;
Alex Dai4c7e77f2015-08-12 15:43:40 +01001942
Oscar Mateoa8b93702017-05-10 15:04:51 +00001943 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1944 struct intel_engine_cs *engine;
Alex Dai4c7e77f2015-08-12 15:43:40 +01001945
Oscar Mateoa8b93702017-05-10 15:04:51 +00001946 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1947 continue;
Alex Dai4c7e77f2015-08-12 15:43:40 +01001948
Oscar Mateoa8b93702017-05-10 15:04:51 +00001949 seq_printf(m, "GuC stage descriptor %u:\n", index);
1950 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1951 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1952 seq_printf(m, "\tPriority: %d\n", desc->priority);
1953 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1954 seq_printf(m, "\tEngines used: 0x%x\n",
1955 desc->engines_used);
1956 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1957 desc->db_trigger_phy,
1958 desc->db_trigger_cpu,
1959 desc->db_trigger_uk);
1960 seq_printf(m, "\tProcess descriptor: 0x%x\n",
1961 desc->process_desc);
Colin Ian King9a094852017-05-16 10:22:35 +01001962 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
Oscar Mateoa8b93702017-05-10 15:04:51 +00001963 desc->wq_addr, desc->wq_size);
1964 seq_putc(m, '\n');
1965
Chris Wilson750e76b2019-08-06 13:43:00 +01001966 for_each_uabi_engine(engine, dev_priv) {
Oscar Mateoa8b93702017-05-10 15:04:51 +00001967 u32 guc_engine_id = engine->guc_id;
1968 struct guc_execlist_context *lrc =
1969 &desc->lrc[guc_engine_id];
1970
1971 seq_printf(m, "\t%s LRC:\n", engine->name);
1972 seq_printf(m, "\t\tContext desc: 0x%x\n",
1973 lrc->context_desc);
1974 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1975 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1976 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1977 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1978 seq_putc(m, '\n');
1979 }
Alex Dai4c7e77f2015-08-12 15:43:40 +01001980 }
1981
Oscar Mateoa8b93702017-05-10 15:04:51 +00001982 return 0;
1983}
1984
Alex Dai4c7e77f2015-08-12 15:43:40 +01001985static int i915_guc_log_dump(struct seq_file *m, void *data)
1986{
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07001987 struct drm_info_node *node = m->private;
1988 struct drm_i915_private *dev_priv = node_to_i915(node);
1989 bool dump_load_err = !!node->info_ent->data;
1990 struct drm_i915_gem_object *obj = NULL;
1991 u32 *log;
1992 int i = 0;
Alex Dai4c7e77f2015-08-12 15:43:40 +01001993
Daniele Ceraolo Spurio702668e2019-07-24 17:18:06 -07001994 if (!HAS_GT_UC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00001995 return -ENODEV;
1996
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07001997 if (dump_load_err)
Michal Wajdeczko32ff76e82019-08-02 18:40:53 +00001998 obj = dev_priv->gt.uc.load_err_log;
Daniele Ceraolo Spurio8b5689d2019-07-13 11:00:12 +01001999 else if (dev_priv->gt.uc.guc.log.vma)
2000 obj = dev_priv->gt.uc.guc.log.vma->obj;
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002001
2002 if (!obj)
Alex Dai4c7e77f2015-08-12 15:43:40 +01002003 return 0;
2004
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002005 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2006 if (IS_ERR(log)) {
2007 DRM_DEBUG("Failed to pin object\n");
2008 seq_puts(m, "(log data unaccessible)\n");
2009 return PTR_ERR(log);
Alex Dai4c7e77f2015-08-12 15:43:40 +01002010 }
2011
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002012 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2013 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2014 *(log + i), *(log + i + 1),
2015 *(log + i + 2), *(log + i + 3));
2016
Alex Dai4c7e77f2015-08-12 15:43:40 +01002017 seq_putc(m, '\n');
2018
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07002019 i915_gem_object_unpin_map(obj);
2020
Alex Dai4c7e77f2015-08-12 15:43:40 +01002021 return 0;
2022}
2023
Michał Winiarski4977a282018-03-19 10:53:40 +01002024static int i915_guc_log_level_get(void *data, u64 *val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302025{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002026 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302027
Michał Winiarski86aa8242018-03-08 16:46:53 +01002028 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002029 return -ENODEV;
2030
Daniele Ceraolo Spurio8b5689d2019-07-13 11:00:12 +01002031 *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302032
2033 return 0;
2034}
2035
Michał Winiarski4977a282018-03-19 10:53:40 +01002036static int i915_guc_log_level_set(void *data, u64 val)
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302037{
Chris Wilsonbcc36d82017-04-07 20:42:20 +01002038 struct drm_i915_private *dev_priv = data;
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302039
Michał Winiarski86aa8242018-03-08 16:46:53 +01002040 if (!USES_GUC(dev_priv))
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002041 return -ENODEV;
2042
Daniele Ceraolo Spurio8b5689d2019-07-13 11:00:12 +01002043 return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302044}
2045
Michał Winiarski4977a282018-03-19 10:53:40 +01002046DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2047 i915_guc_log_level_get, i915_guc_log_level_set,
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05302048 "%lld\n");
2049
Michał Winiarski4977a282018-03-19 10:53:40 +01002050static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2051{
2052 struct drm_i915_private *dev_priv = inode->i_private;
2053
2054 if (!USES_GUC(dev_priv))
2055 return -ENODEV;
2056
Daniele Ceraolo Spurio8b5689d2019-07-13 11:00:12 +01002057 file->private_data = &dev_priv->gt.uc.guc.log;
Michał Winiarski4977a282018-03-19 10:53:40 +01002058
Daniele Ceraolo Spurio8b5689d2019-07-13 11:00:12 +01002059 return intel_guc_log_relay_open(&dev_priv->gt.uc.guc.log);
Michał Winiarski4977a282018-03-19 10:53:40 +01002060}
2061
2062static ssize_t
2063i915_guc_log_relay_write(struct file *filp,
2064 const char __user *ubuf,
2065 size_t cnt,
2066 loff_t *ppos)
2067{
2068 struct intel_guc_log *log = filp->private_data;
2069
2070 intel_guc_log_relay_flush(log);
2071
2072 return cnt;
2073}
2074
2075static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2076{
2077 struct drm_i915_private *dev_priv = inode->i_private;
2078
Daniele Ceraolo Spurio8b5689d2019-07-13 11:00:12 +01002079 intel_guc_log_relay_close(&dev_priv->gt.uc.guc.log);
Michał Winiarski4977a282018-03-19 10:53:40 +01002080
2081 return 0;
2082}
2083
2084static const struct file_operations i915_guc_log_relay_fops = {
2085 .owner = THIS_MODULE,
2086 .open = i915_guc_log_relay_open,
2087 .write = i915_guc_log_relay_write,
2088 .release = i915_guc_log_relay_release,
2089};
2090
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002091static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2092{
2093 u8 val;
2094 static const char * const sink_status[] = {
2095 "inactive",
2096 "transition to active, capture and display",
2097 "active, display from RFB",
2098 "active, capture and display on sink device timings",
2099 "transition to inactive, capture and display, timing re-sync",
2100 "reserved",
2101 "reserved",
2102 "sink internal error",
2103 };
2104 struct drm_connector *connector = m->private;
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002105 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002106 struct intel_dp *intel_dp =
2107 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002108 int ret;
2109
2110 if (!CAN_PSR(dev_priv)) {
2111 seq_puts(m, "PSR Unsupported\n");
2112 return -ENODEV;
2113 }
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002114
2115 if (connector->status != connector_status_connected)
2116 return -ENODEV;
2117
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002118 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2119
2120 if (ret == 1) {
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002121 const char *str = "unknown";
2122
2123 val &= DP_PSR_SINK_STATE_MASK;
2124 if (val < ARRAY_SIZE(sink_status))
2125 str = sink_status[val];
2126 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2127 } else {
Rodrigo Vivi7a72c782018-07-19 17:31:55 -07002128 return ret;
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07002129 }
2130
2131 return 0;
2132}
2133DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2134
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302135static void
2136psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
Chris Wilsonb86bef202017-01-16 13:06:21 +00002137{
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002138 u32 val, status_val;
2139 const char *status = "unknown";
Chris Wilsonb86bef202017-01-16 13:06:21 +00002140
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302141 if (dev_priv->psr.psr2_enabled) {
2142 static const char * const live_status[] = {
2143 "IDLE",
2144 "CAPTURE",
2145 "CAPTURE_FS",
2146 "SLEEP",
2147 "BUFON_FW",
2148 "ML_UP",
2149 "SU_STANDBY",
2150 "FAST_SLEEP",
2151 "DEEP_SLEEP",
2152 "BUF_ON",
2153 "TG_ON"
2154 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002155 val = I915_READ(EDP_PSR2_STATUS);
2156 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2157 EDP_PSR2_STATUS_STATE_SHIFT;
2158 if (status_val < ARRAY_SIZE(live_status))
2159 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302160 } else {
2161 static const char * const live_status[] = {
2162 "IDLE",
2163 "SRDONACK",
2164 "SRDENT",
2165 "BUFOFF",
2166 "BUFON",
2167 "AUXACK",
2168 "SRDOFFACK",
2169 "SRDENT_ON",
2170 };
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002171 val = I915_READ(EDP_PSR_STATUS);
2172 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2173 EDP_PSR_STATUS_STATE_SHIFT;
2174 if (status_val < ARRAY_SIZE(live_status))
2175 status = live_status[status_val];
Vathsala Nagaraju00b06292018-06-27 13:38:30 +05302176 }
Chris Wilsonb86bef202017-01-16 13:06:21 +00002177
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002178 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
Chris Wilsonb86bef202017-01-16 13:06:21 +00002179}
2180
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002181static int i915_edp_psr_status(struct seq_file *m, void *data)
2182{
David Weinehall36cdd012016-08-22 13:59:31 +03002183 struct drm_i915_private *dev_priv = node_to_i915(m->private);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002184 struct i915_psr *psr = &dev_priv->psr;
Chris Wilsona0371212019-01-14 14:21:14 +00002185 intel_wakeref_t wakeref;
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002186 const char *status;
2187 bool enabled;
2188 u32 val;
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002189
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002190 if (!HAS_PSR(dev_priv))
2191 return -ENODEV;
Damien Lespiau3553a8e2015-03-09 14:17:58 +00002192
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002193 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2194 if (psr->dp)
2195 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2196 seq_puts(m, "\n");
2197
2198 if (!psr->sink_support)
Dhinakaran Pandiyanc9ef2912018-01-03 13:38:24 -08002199 return 0;
2200
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002201 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002202 mutex_lock(&psr->lock);
Paulo Zanonic8c8fb32013-11-27 18:21:54 -02002203
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002204 if (psr->enabled)
2205 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
Dhinakaran Pandiyance3508f2018-05-11 16:00:59 -07002206 else
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002207 status = "disabled";
2208 seq_printf(m, "PSR mode: %s\n", status);
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002209
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002210 if (!psr->enabled)
2211 goto unlock;
Rodrigo Vivi60e5ffe2016-02-01 12:02:07 -08002212
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002213 if (psr->psr2_enabled) {
2214 val = I915_READ(EDP_PSR2_CTL);
2215 enabled = val & EDP_PSR2_ENABLE;
2216 } else {
2217 val = I915_READ(EDP_PSR_CTL);
2218 enabled = val & EDP_PSR_ENABLE;
2219 }
2220 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2221 enableddisabled(enabled), val);
2222 psr_source_status(dev_priv, m);
2223 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2224 psr->busy_frontbuffer_bits);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002225
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002226 /*
Rodrigo Vivi05eec3c2015-11-23 14:16:40 -08002227 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2228 */
David Weinehall36cdd012016-08-22 13:59:31 +03002229 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002230 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2231 seq_printf(m, "Performance counter: %u\n", val);
Rodrigo Vivia6cbdb82014-11-14 08:52:40 -08002232 }
Nagaraju, Vathsala6ba1f9e2017-01-06 22:02:32 +05302233
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002234 if (psr->debug & I915_PSR_DEBUG_IRQ) {
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002235 seq_printf(m, "Last attempted entry at: %lld\n",
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002236 psr->last_entry_attempt);
2237 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
Dhinakaran Pandiyan3f983e542018-04-03 14:24:20 -07002238 }
2239
José Roberto de Souzaa81f7812019-01-17 12:55:48 -08002240 if (psr->psr2_enabled) {
2241 u32 su_frames_val[3];
2242 int frame;
2243
2244 /*
2245 * Reading all 3 registers before hand to minimize crossing a
2246 * frame boundary between register reads
2247 */
2248 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2249 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2250
2251 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2252
2253 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2254 u32 su_blocks;
2255
2256 su_blocks = su_frames_val[frame / 3] &
2257 PSR2_SU_STATUS_MASK(frame);
2258 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2259 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2260 }
2261 }
2262
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002263unlock:
2264 mutex_unlock(&psr->lock);
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002265 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
José Roberto de Souza47c6cd52019-01-17 12:55:46 -08002266
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03002267 return 0;
2268}
2269
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002270static int
2271i915_edp_psr_debug_set(void *data, u64 val)
2272{
2273 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00002274 intel_wakeref_t wakeref;
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002275 int ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002276
2277 if (!CAN_PSR(dev_priv))
2278 return -ENODEV;
2279
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002280 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002281
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002282 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002283
José Roberto de Souza23ec9f52019-02-06 13:18:45 -08002284 ret = intel_psr_debug_set(dev_priv, val);
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002285
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002286 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002287
Maarten Lankhorstc44301f2018-08-09 16:21:01 +02002288 return ret;
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002289}
2290
2291static int
2292i915_edp_psr_debug_get(void *data, u64 *val)
2293{
2294 struct drm_i915_private *dev_priv = data;
2295
2296 if (!CAN_PSR(dev_priv))
2297 return -ENODEV;
2298
2299 *val = READ_ONCE(dev_priv->psr.debug);
2300 return 0;
2301}
2302
2303DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2304 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2305 "%llu\n");
2306
Jesse Barnesec013e72013-08-20 10:29:23 +01002307static int i915_energy_uJ(struct seq_file *m, void *data)
2308{
David Weinehall36cdd012016-08-22 13:59:31 +03002309 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002310 unsigned long long power;
Chris Wilsona0371212019-01-14 14:21:14 +00002311 intel_wakeref_t wakeref;
Jesse Barnesec013e72013-08-20 10:29:23 +01002312 u32 units;
2313
David Weinehall36cdd012016-08-22 13:59:31 +03002314 if (INTEL_GEN(dev_priv) < 6)
Jesse Barnesec013e72013-08-20 10:29:23 +01002315 return -ENODEV;
2316
Chris Wilsond4225a52019-01-14 14:21:23 +00002317 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002318 return -ENODEV;
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002319
2320 units = (power & 0x1f00) >> 8;
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -07002321 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
Chris Wilsond4225a52019-01-14 14:21:23 +00002322 power = I915_READ(MCH_SECP_NRG_STTS);
2323
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002324 power = (1000000 * power) >> units; /* convert to uJ */
Gabriel Krisman Bertazid38014e2017-07-26 02:30:16 -03002325 seq_printf(m, "%llu", power);
Paulo Zanoni371db662013-08-19 13:18:10 -03002326
2327 return 0;
2328}
2329
Damien Lespiau6455c872015-06-04 18:23:57 +01002330static int i915_runtime_pm_status(struct seq_file *m, void *unused)
Paulo Zanoni371db662013-08-19 13:18:10 -03002331{
David Weinehall36cdd012016-08-22 13:59:31 +03002332 struct drm_i915_private *dev_priv = node_to_i915(m->private);
David Weinehall52a05c32016-08-22 13:32:44 +03002333 struct pci_dev *pdev = dev_priv->drm.pdev;
Paulo Zanoni371db662013-08-19 13:18:10 -03002334
Chris Wilsona156e642016-04-03 14:14:21 +01002335 if (!HAS_RUNTIME_PM(dev_priv))
2336 seq_puts(m, "Runtime power management not supported\n");
Paulo Zanoni371db662013-08-19 13:18:10 -03002337
Chris Wilson25c896bd2019-01-14 14:21:25 +00002338 seq_printf(m, "Runtime power status: %s\n",
2339 enableddisabled(!dev_priv->power_domains.wakeref));
2340
Chris Wilsond9948a12019-02-28 10:20:35 +00002341 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
Paulo Zanoni371db662013-08-19 13:18:10 -03002342 seq_printf(m, "IRQs disabled: %s\n",
Jesse Barnes9df7575f2014-06-20 09:29:20 -07002343 yesno(!intel_irqs_enabled(dev_priv)));
Chris Wilson0d804182015-06-15 12:52:28 +01002344#ifdef CONFIG_PM
Damien Lespiaua6aaec82015-06-04 18:23:58 +01002345 seq_printf(m, "Usage count: %d\n",
David Weinehall36cdd012016-08-22 13:59:31 +03002346 atomic_read(&dev_priv->drm.dev->power.usage_count));
Chris Wilson0d804182015-06-15 12:52:28 +01002347#else
2348 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2349#endif
Chris Wilsona156e642016-04-03 14:14:21 +01002350 seq_printf(m, "PCI device power state: %s [%d]\n",
David Weinehall52a05c32016-08-22 13:32:44 +03002351 pci_power_name(pdev->current_state),
2352 pdev->current_state);
Paulo Zanoni371db662013-08-19 13:18:10 -03002353
Chris Wilsonbd780f32019-01-14 14:21:09 +00002354 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2355 struct drm_printer p = drm_seq_file_printer(m);
2356
Daniele Ceraolo Spurio69c66352019-06-13 16:21:53 -07002357 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
Chris Wilsonbd780f32019-01-14 14:21:09 +00002358 }
2359
Jesse Barnesec013e72013-08-20 10:29:23 +01002360 return 0;
2361}
2362
Imre Deak1da51582013-11-25 17:15:35 +02002363static int i915_power_domain_info(struct seq_file *m, void *unused)
2364{
David Weinehall36cdd012016-08-22 13:59:31 +03002365 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak1da51582013-11-25 17:15:35 +02002366 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2367 int i;
2368
2369 mutex_lock(&power_domains->lock);
2370
2371 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2372 for (i = 0; i < power_domains->power_well_count; i++) {
2373 struct i915_power_well *power_well;
2374 enum intel_display_power_domain power_domain;
2375
2376 power_well = &power_domains->power_wells[i];
Imre Deakf28ec6f2018-08-06 12:58:37 +03002377 seq_printf(m, "%-25s %d\n", power_well->desc->name,
Imre Deak1da51582013-11-25 17:15:35 +02002378 power_well->count);
2379
Imre Deakf28ec6f2018-08-06 12:58:37 +03002380 for_each_power_domain(power_domain, power_well->desc->domains)
Imre Deak1da51582013-11-25 17:15:35 +02002381 seq_printf(m, " %-23s %d\n",
Imre Deak656409b2019-07-11 10:31:02 -07002382 intel_display_power_domain_str(dev_priv,
2383 power_domain),
Imre Deak1da51582013-11-25 17:15:35 +02002384 power_domains->domain_use_count[power_domain]);
Imre Deak1da51582013-11-25 17:15:35 +02002385 }
2386
2387 mutex_unlock(&power_domains->lock);
2388
2389 return 0;
2390}
2391
Damien Lespiaub7cec662015-10-27 14:47:01 +02002392static int i915_dmc_info(struct seq_file *m, void *unused)
2393{
David Weinehall36cdd012016-08-22 13:59:31 +03002394 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Chris Wilsona0371212019-01-14 14:21:14 +00002395 intel_wakeref_t wakeref;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002396 struct intel_csr *csr;
José Roberto de Souza5d571062019-07-25 17:24:10 -07002397 i915_reg_t dc5_reg, dc6_reg = {};
Damien Lespiaub7cec662015-10-27 14:47:01 +02002398
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002399 if (!HAS_CSR(dev_priv))
2400 return -ENODEV;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002401
2402 csr = &dev_priv->csr;
2403
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002404 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002405
Damien Lespiaub7cec662015-10-27 14:47:01 +02002406 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2407 seq_printf(m, "path: %s\n", csr->fw_path);
2408
2409 if (!csr->dmc_payload)
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002410 goto out;
Damien Lespiaub7cec662015-10-27 14:47:01 +02002411
2412 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2413 CSR_VERSION_MINOR(csr->version));
2414
José Roberto de Souza5d571062019-07-25 17:24:10 -07002415 if (INTEL_GEN(dev_priv) >= 12) {
2416 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
2417 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
2418 } else {
2419 dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2420 SKL_CSR_DC3_DC5_COUNT;
2421 if (!IS_GEN9_LP(dev_priv))
2422 dc6_reg = SKL_CSR_DC5_DC6_COUNT;
2423 }
Imre Deak34b2f8d2018-10-31 22:02:20 +02002424
José Roberto de Souza5d571062019-07-25 17:24:10 -07002425 seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
2426 if (dc6_reg.reg)
2427 seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
Damien Lespiau83372062015-10-30 17:53:32 +02002428
Mika Kuoppala6fb403d2015-10-30 17:54:47 +02002429out:
2430 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2431 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2432 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2433
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002434 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
Damien Lespiau83372062015-10-30 17:53:32 +02002435
Damien Lespiaub7cec662015-10-27 14:47:01 +02002436 return 0;
2437}
2438
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002439static void intel_seq_print_mode(struct seq_file *m, int tabs,
2440 struct drm_display_mode *mode)
2441{
2442 int i;
2443
2444 for (i = 0; i < tabs; i++)
2445 seq_putc(m, '\t');
2446
Shayenne Moura4fb6bb82018-12-20 10:27:57 -02002447 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002448}
2449
2450static void intel_encoder_info(struct seq_file *m,
2451 struct intel_crtc *intel_crtc,
2452 struct intel_encoder *intel_encoder)
2453{
David Weinehall36cdd012016-08-22 13:59:31 +03002454 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2455 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002456 struct drm_crtc *crtc = &intel_crtc->base;
2457 struct intel_connector *intel_connector;
2458 struct drm_encoder *encoder;
2459
2460 encoder = &intel_encoder->base;
2461 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
Jani Nikula8e329a032014-06-03 14:56:21 +03002462 encoder->base.id, encoder->name);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002463 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2464 struct drm_connector *connector = &intel_connector->base;
2465 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2466 connector->base.id,
Jani Nikulac23cc412014-06-03 14:56:17 +03002467 connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002468 drm_get_connector_status_name(connector->status));
2469 if (connector->status == connector_status_connected) {
2470 struct drm_display_mode *mode = &crtc->mode;
2471 seq_printf(m, ", mode:\n");
2472 intel_seq_print_mode(m, 2, mode);
2473 } else {
2474 seq_putc(m, '\n');
2475 }
2476 }
2477}
2478
2479static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2480{
David Weinehall36cdd012016-08-22 13:59:31 +03002481 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2482 struct drm_device *dev = &dev_priv->drm;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002483 struct drm_crtc *crtc = &intel_crtc->base;
2484 struct intel_encoder *intel_encoder;
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002485 struct drm_plane_state *plane_state = crtc->primary->state;
2486 struct drm_framebuffer *fb = plane_state->fb;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002487
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002488 if (fb)
Matt Roper5aa8a932014-06-16 10:12:55 -07002489 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
Maarten Lankhorst23a48d52015-09-10 16:07:57 +02002490 fb->base.id, plane_state->src_x >> 16,
2491 plane_state->src_y >> 16, fb->width, fb->height);
Matt Roper5aa8a932014-06-16 10:12:55 -07002492 else
2493 seq_puts(m, "\tprimary plane disabled\n");
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002494 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2495 intel_encoder_info(m, intel_crtc, intel_encoder);
2496}
2497
2498static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2499{
2500 struct drm_display_mode *mode = panel->fixed_mode;
2501
2502 seq_printf(m, "\tfixed mode:\n");
2503 intel_seq_print_mode(m, 2, mode);
2504}
2505
Anshuman Guptaaed74502019-07-19 11:25:13 +05302506static void intel_hdcp_info(struct seq_file *m,
2507 struct intel_connector *intel_connector)
2508{
2509 bool hdcp_cap, hdcp2_cap;
2510
2511 hdcp_cap = intel_hdcp_capable(intel_connector);
2512 hdcp2_cap = intel_hdcp2_capable(intel_connector);
2513
2514 if (hdcp_cap)
2515 seq_puts(m, "HDCP1.4 ");
2516 if (hdcp2_cap)
2517 seq_puts(m, "HDCP2.2 ");
2518
2519 if (!hdcp_cap && !hdcp2_cap)
2520 seq_puts(m, "None");
2521
2522 seq_puts(m, "\n");
2523}
2524
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002525static void intel_dp_info(struct seq_file *m,
2526 struct intel_connector *intel_connector)
2527{
2528 struct intel_encoder *intel_encoder = intel_connector->encoder;
2529 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2530
2531 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
Jani Nikula742f4912015-09-03 11:16:09 +03002532 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02002533 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002534 intel_panel_info(m, &intel_connector->panel);
Mika Kahola80209e52016-09-09 14:10:57 +03002535
2536 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2537 &intel_dp->aux);
Anshuman Guptaaed74502019-07-19 11:25:13 +05302538 if (intel_connector->hdcp.shim) {
2539 seq_puts(m, "\tHDCP version: ");
2540 intel_hdcp_info(m, intel_connector);
2541 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002542}
2543
Libin Yang9a148a92016-11-28 20:07:05 +08002544static void intel_dp_mst_info(struct seq_file *m,
2545 struct intel_connector *intel_connector)
2546{
2547 struct intel_encoder *intel_encoder = intel_connector->encoder;
2548 struct intel_dp_mst_encoder *intel_mst =
2549 enc_to_mst(&intel_encoder->base);
2550 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2551 struct intel_dp *intel_dp = &intel_dig_port->dp;
2552 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2553 intel_connector->port);
2554
2555 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2556}
2557
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002558static void intel_hdmi_info(struct seq_file *m,
2559 struct intel_connector *intel_connector)
2560{
2561 struct intel_encoder *intel_encoder = intel_connector->encoder;
2562 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2563
Jani Nikula742f4912015-09-03 11:16:09 +03002564 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
Anshuman Guptaaed74502019-07-19 11:25:13 +05302565 if (intel_connector->hdcp.shim) {
2566 seq_puts(m, "\tHDCP version: ");
2567 intel_hdcp_info(m, intel_connector);
2568 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002569}
2570
2571static void intel_lvds_info(struct seq_file *m,
2572 struct intel_connector *intel_connector)
2573{
2574 intel_panel_info(m, &intel_connector->panel);
2575}
2576
2577static void intel_connector_info(struct seq_file *m,
2578 struct drm_connector *connector)
2579{
2580 struct intel_connector *intel_connector = to_intel_connector(connector);
2581 struct intel_encoder *intel_encoder = intel_connector->encoder;
Jesse Barnesf103fc72014-02-20 12:39:57 -08002582 struct drm_display_mode *mode;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002583
2584 seq_printf(m, "connector %d: type %s, status: %s\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03002585 connector->base.id, connector->name,
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002586 drm_get_connector_status_name(connector->status));
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002587
2588 if (connector->status == connector_status_disconnected)
2589 return;
2590
José Roberto de Souza3e037f92018-10-30 14:57:46 -07002591 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2592 connector->display_info.width_mm,
2593 connector->display_info.height_mm);
2594 seq_printf(m, "\tsubpixel order: %s\n",
2595 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2596 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002597
Maarten Lankhorst77d1f612017-06-26 10:33:49 +02002598 if (!intel_encoder)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002599 return;
2600
2601 switch (connector->connector_type) {
2602 case DRM_MODE_CONNECTOR_DisplayPort:
2603 case DRM_MODE_CONNECTOR_eDP:
Libin Yang9a148a92016-11-28 20:07:05 +08002604 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2605 intel_dp_mst_info(m, intel_connector);
2606 else
2607 intel_dp_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002608 break;
2609 case DRM_MODE_CONNECTOR_LVDS:
2610 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
Dave Airlie36cd7442014-05-02 13:44:18 +10002611 intel_lvds_info(m, intel_connector);
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002612 break;
2613 case DRM_MODE_CONNECTOR_HDMIA:
2614 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
Ville Syrjälä7e732ca2017-10-27 22:31:24 +03002615 intel_encoder->type == INTEL_OUTPUT_DDI)
Maarten Lankhorstee648a72016-06-21 12:00:38 +02002616 intel_hdmi_info(m, intel_connector);
2617 break;
2618 default:
2619 break;
Dave Airlie36cd7442014-05-02 13:44:18 +10002620 }
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002621
Jesse Barnesf103fc72014-02-20 12:39:57 -08002622 seq_printf(m, "\tmodes:\n");
2623 list_for_each_entry(mode, &connector->modes, head)
2624 intel_seq_print_mode(m, 2, mode);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002625}
2626
Robert Fekete3abc4e02015-10-27 16:58:32 +01002627static const char *plane_type(enum drm_plane_type type)
2628{
2629 switch (type) {
2630 case DRM_PLANE_TYPE_OVERLAY:
2631 return "OVL";
2632 case DRM_PLANE_TYPE_PRIMARY:
2633 return "PRI";
2634 case DRM_PLANE_TYPE_CURSOR:
2635 return "CUR";
2636 /*
2637 * Deliberately omitting default: to generate compiler warnings
2638 * when a new drm_plane_type gets added.
2639 */
2640 }
2641
2642 return "unknown";
2643}
2644
Jani Nikula5852a152019-01-07 16:51:49 +02002645static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
Robert Fekete3abc4e02015-10-27 16:58:32 +01002646{
Robert Fekete3abc4e02015-10-27 16:58:32 +01002647 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04002648 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
Robert Fekete3abc4e02015-10-27 16:58:32 +01002649 * will print them all to visualize if the values are misused
2650 */
Jani Nikula5852a152019-01-07 16:51:49 +02002651 snprintf(buf, bufsize,
Robert Fekete3abc4e02015-10-27 16:58:32 +01002652 "%s%s%s%s%s%s(0x%08x)",
Robert Fossc2c446a2017-05-19 16:50:17 -04002653 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2654 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2655 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2656 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2657 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2658 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
Robert Fekete3abc4e02015-10-27 16:58:32 +01002659 rotation);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002660}
2661
2662static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2663{
David Weinehall36cdd012016-08-22 13:59:31 +03002664 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2665 struct drm_device *dev = &dev_priv->drm;
Robert Fekete3abc4e02015-10-27 16:58:32 +01002666 struct intel_plane *intel_plane;
2667
2668 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2669 struct drm_plane_state *state;
2670 struct drm_plane *plane = &intel_plane->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002671 struct drm_format_name_buf format_name;
Jani Nikula5852a152019-01-07 16:51:49 +02002672 char rot_str[48];
Robert Fekete3abc4e02015-10-27 16:58:32 +01002673
2674 if (!plane->state) {
2675 seq_puts(m, "plane->state is NULL!\n");
2676 continue;
2677 }
2678
2679 state = plane->state;
2680
Eric Engestrom90844f02016-08-15 01:02:38 +01002681 if (state->fb) {
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002682 drm_get_format_name(state->fb->format->format,
2683 &format_name);
Eric Engestrom90844f02016-08-15 01:02:38 +01002684 } else {
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002685 sprintf(format_name.str, "N/A");
Eric Engestrom90844f02016-08-15 01:02:38 +01002686 }
2687
Jani Nikula5852a152019-01-07 16:51:49 +02002688 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2689
Robert Fekete3abc4e02015-10-27 16:58:32 +01002690 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2691 plane->base.id,
2692 plane_type(intel_plane->base.type),
2693 state->crtc_x, state->crtc_y,
2694 state->crtc_w, state->crtc_h,
2695 (state->src_x >> 16),
2696 ((state->src_x & 0xffff) * 15625) >> 10,
2697 (state->src_y >> 16),
2698 ((state->src_y & 0xffff) * 15625) >> 10,
2699 (state->src_w >> 16),
2700 ((state->src_w & 0xffff) * 15625) >> 10,
2701 (state->src_h >> 16),
2702 ((state->src_h & 0xffff) * 15625) >> 10,
Eric Engestromb3c11ac2016-11-12 01:12:56 +00002703 format_name.str,
Jani Nikula5852a152019-01-07 16:51:49 +02002704 rot_str);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002705 }
2706}
2707
2708static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2709{
2710 struct intel_crtc_state *pipe_config;
2711 int num_scalers = intel_crtc->num_scalers;
2712 int i;
2713
2714 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2715
2716 /* Not all platformas have a scaler */
2717 if (num_scalers) {
2718 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2719 num_scalers,
2720 pipe_config->scaler_state.scaler_users,
2721 pipe_config->scaler_state.scaler_id);
2722
A.Sunil Kamath58415912016-11-20 23:20:26 +05302723 for (i = 0; i < num_scalers; i++) {
Robert Fekete3abc4e02015-10-27 16:58:32 +01002724 struct intel_scaler *sc =
2725 &pipe_config->scaler_state.scalers[i];
2726
2727 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2728 i, yesno(sc->in_use), sc->mode);
2729 }
2730 seq_puts(m, "\n");
2731 } else {
2732 seq_puts(m, "\tNo scalers available on this platform\n");
2733 }
2734}
2735
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002736static int i915_display_info(struct seq_file *m, void *unused)
2737{
David Weinehall36cdd012016-08-22 13:59:31 +03002738 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2739 struct drm_device *dev = &dev_priv->drm;
Chris Wilson065f2ec2014-03-12 09:13:13 +00002740 struct intel_crtc *crtc;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002741 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002742 struct drm_connector_list_iter conn_iter;
Chris Wilsona0371212019-01-14 14:21:14 +00002743 intel_wakeref_t wakeref;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002744
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002745 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
Chris Wilsona0371212019-01-14 14:21:14 +00002746
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002747 seq_printf(m, "CRTC info\n");
2748 seq_printf(m, "---------\n");
Damien Lespiaud3fcc802014-05-13 23:32:22 +01002749 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02002750 struct intel_crtc_state *pipe_config;
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002751
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002752 drm_modeset_lock(&crtc->base.mutex, NULL);
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02002753 pipe_config = to_intel_crtc_state(crtc->base.state);
2754
Robert Fekete3abc4e02015-10-27 16:58:32 +01002755 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
Chris Wilson065f2ec2014-03-12 09:13:13 +00002756 crtc->base.base.id, pipe_name(crtc->pipe),
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02002757 yesno(pipe_config->base.active),
Robert Fekete3abc4e02015-10-27 16:58:32 +01002758 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
2759 yesno(pipe_config->dither), pipe_config->pipe_bpp);
2760
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02002761 if (pipe_config->base.active) {
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03002762 struct intel_plane *cursor =
2763 to_intel_plane(crtc->base.cursor);
2764
Chris Wilson065f2ec2014-03-12 09:13:13 +00002765 intel_crtc_info(m, crtc);
2766
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03002767 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
2768 yesno(cursor->base.state->visible),
2769 cursor->base.state->crtc_x,
2770 cursor->base.state->crtc_y,
2771 cursor->base.state->crtc_w,
2772 cursor->base.state->crtc_h,
2773 cursor->cursor.base);
Robert Fekete3abc4e02015-10-27 16:58:32 +01002774 intel_scaler_info(m, crtc);
2775 intel_plane_info(m, crtc);
Paulo Zanonia23dc652014-04-01 14:55:11 -03002776 }
Daniel Vettercace8412014-05-22 17:56:31 +02002777
2778 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2779 yesno(!crtc->cpu_fifo_underrun_disabled),
2780 yesno(!crtc->pch_fifo_underrun_disabled));
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002781 drm_modeset_unlock(&crtc->base.mutex);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002782 }
2783
2784 seq_printf(m, "\n");
2785 seq_printf(m, "Connector info\n");
2786 seq_printf(m, "--------------\n");
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002787 mutex_lock(&dev->mode_config.mutex);
2788 drm_connector_list_iter_begin(dev, &conn_iter);
2789 drm_for_each_connector_iter(connector, &conn_iter)
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002790 intel_connector_info(m, connector);
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01002791 drm_connector_list_iter_end(&conn_iter);
2792 mutex_unlock(&dev->mode_config.mutex);
2793
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002794 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08002795
2796 return 0;
2797}
2798
Chris Wilson1b365952016-10-04 21:11:31 +01002799static int i915_engine_info(struct seq_file *m, void *unused)
2800{
2801 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2802 struct intel_engine_cs *engine;
Chris Wilsona0371212019-01-14 14:21:14 +00002803 intel_wakeref_t wakeref;
Chris Wilsonf636edb2017-10-09 12:02:57 +01002804 struct drm_printer p;
Chris Wilson1b365952016-10-04 21:11:31 +01002805
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002806 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
Chris Wilson9c870d02016-10-24 13:42:15 +01002807
Chris Wilson79ffac852019-04-24 21:07:17 +01002808 seq_printf(m, "GT awake? %s [%d]\n",
2809 yesno(dev_priv->gt.awake),
2810 atomic_read(&dev_priv->gt.wakeref.count));
Lionel Landwerlinf577a032017-11-13 23:34:53 +00002811 seq_printf(m, "CS timestamp frequency: %u kHz\n",
Jani Nikula02584042018-12-31 16:56:41 +02002812 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
Chris Wilsonf73b5672017-03-02 15:03:56 +00002813
Chris Wilsonf636edb2017-10-09 12:02:57 +01002814 p = drm_seq_file_printer(m);
Chris Wilson750e76b2019-08-06 13:43:00 +01002815 for_each_uabi_engine(engine, dev_priv)
Chris Wilson0db18b12017-12-08 01:23:00 +00002816 intel_engine_dump(engine, &p, "%s\n", engine->name);
Chris Wilson1b365952016-10-04 21:11:31 +01002817
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07002818 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
Chris Wilson9c870d02016-10-24 13:42:15 +01002819
Chris Wilson1b365952016-10-04 21:11:31 +01002820 return 0;
2821}
2822
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00002823static int i915_rcs_topology(struct seq_file *m, void *unused)
2824{
2825 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2826 struct drm_printer p = drm_seq_file_printer(m);
2827
Jani Nikula02584042018-12-31 16:56:41 +02002828 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00002829
2830 return 0;
2831}
2832
Chris Wilsonc5418a82017-10-13 21:26:19 +01002833static int i915_shrinker_info(struct seq_file *m, void *unused)
2834{
2835 struct drm_i915_private *i915 = node_to_i915(m->private);
2836
2837 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2838 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2839
2840 return 0;
2841}
2842
Daniel Vetter728e29d2014-06-25 22:01:53 +03002843static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2844{
David Weinehall36cdd012016-08-22 13:59:31 +03002845 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2846 struct drm_device *dev = &dev_priv->drm;
Daniel Vetter728e29d2014-06-25 22:01:53 +03002847 int i;
2848
2849 drm_modeset_lock_all(dev);
2850 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2851 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2852
Lucas De Marchi72f775f2018-03-20 15:06:34 -07002853 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
Lucas De Marchi0823eb92018-03-20 15:06:35 -07002854 pll->info->id);
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +01002855 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02002856 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
Daniel Vetter728e29d2014-06-25 22:01:53 +03002857 seq_printf(m, " tracked hardware state:\n");
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02002858 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
Ander Conselvan de Oliveira3e369b72014-10-29 11:32:32 +02002859 seq_printf(m, " dpll_md: 0x%08x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +02002860 pll->state.hw_state.dpll_md);
2861 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
2862 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
2863 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
Paulo Zanonic27e9172018-04-27 16:14:36 -07002864 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
2865 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
2866 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
2867 pll->state.hw_state.mg_refclkin_ctl);
2868 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2869 pll->state.hw_state.mg_clktop2_coreclkctl1);
2870 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
2871 pll->state.hw_state.mg_clktop2_hsclkctl);
2872 seq_printf(m, " mg_pll_div0: 0x%08x\n",
2873 pll->state.hw_state.mg_pll_div0);
2874 seq_printf(m, " mg_pll_div1: 0x%08x\n",
2875 pll->state.hw_state.mg_pll_div1);
2876 seq_printf(m, " mg_pll_lf: 0x%08x\n",
2877 pll->state.hw_state.mg_pll_lf);
2878 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2879 pll->state.hw_state.mg_pll_frac_lock);
2880 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
2881 pll->state.hw_state.mg_pll_ssc);
2882 seq_printf(m, " mg_pll_bias: 0x%08x\n",
2883 pll->state.hw_state.mg_pll_bias);
2884 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2885 pll->state.hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetter728e29d2014-06-25 22:01:53 +03002886 }
2887 drm_modeset_unlock_all(dev);
2888
2889 return 0;
2890}
2891
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01002892static int i915_wa_registers(struct seq_file *m, void *unused)
Arun Siluvery888b5992014-08-26 14:44:51 +01002893{
Tvrtko Ursulin452420d2018-12-03 13:33:57 +00002894 struct drm_i915_private *i915 = node_to_i915(m->private);
Chris Wilson4a54da32019-07-03 14:58:04 +01002895 struct intel_engine_cs *engine;
Arun Siluvery888b5992014-08-26 14:44:51 +01002896
Chris Wilson750e76b2019-08-06 13:43:00 +01002897 for_each_uabi_engine(engine, i915) {
Chris Wilson4a54da32019-07-03 14:58:04 +01002898 const struct i915_wa_list *wal = &engine->ctx_wa_list;
2899 const struct i915_wa *wa;
2900 unsigned int count;
2901
2902 count = wal->count;
2903 if (!count)
2904 continue;
2905
2906 seq_printf(m, "%s: Workarounds applied: %u\n",
2907 engine->name, count);
2908
2909 for (wa = wal->list; count--; wa++)
2910 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2911 i915_mmio_reg_offset(wa->reg),
2912 wa->val, wa->mask);
2913
2914 seq_printf(m, "\n");
2915 }
Arun Siluvery888b5992014-08-26 14:44:51 +01002916
2917 return 0;
2918}
2919
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05302920static int i915_ipc_status_show(struct seq_file *m, void *data)
2921{
2922 struct drm_i915_private *dev_priv = m->private;
2923
2924 seq_printf(m, "Isochronous Priority Control: %s\n",
2925 yesno(dev_priv->ipc_enabled));
2926 return 0;
2927}
2928
2929static int i915_ipc_status_open(struct inode *inode, struct file *file)
2930{
2931 struct drm_i915_private *dev_priv = inode->i_private;
2932
2933 if (!HAS_IPC(dev_priv))
2934 return -ENODEV;
2935
2936 return single_open(file, i915_ipc_status_show, dev_priv);
2937}
2938
2939static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
2940 size_t len, loff_t *offp)
2941{
2942 struct seq_file *m = file->private_data;
2943 struct drm_i915_private *dev_priv = m->private;
Chris Wilsona0371212019-01-14 14:21:14 +00002944 intel_wakeref_t wakeref;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05302945 bool enable;
Chris Wilsond4225a52019-01-14 14:21:23 +00002946 int ret;
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05302947
2948 ret = kstrtobool_from_user(ubuf, len, &enable);
2949 if (ret < 0)
2950 return ret;
2951
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -07002952 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
Chris Wilsond4225a52019-01-14 14:21:23 +00002953 if (!dev_priv->ipc_enabled && enable)
2954 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
2955 dev_priv->wm.distrust_bios_wm = true;
2956 dev_priv->ipc_enabled = enable;
2957 intel_enable_ipc(dev_priv);
2958 }
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05302959
2960 return len;
2961}
2962
2963static const struct file_operations i915_ipc_status_fops = {
2964 .owner = THIS_MODULE,
2965 .open = i915_ipc_status_open,
2966 .read = seq_read,
2967 .llseek = seq_lseek,
2968 .release = single_release,
2969 .write = i915_ipc_status_write
2970};
2971
Damien Lespiauc5511e42014-11-04 17:06:51 +00002972static int i915_ddb_info(struct seq_file *m, void *unused)
2973{
David Weinehall36cdd012016-08-22 13:59:31 +03002974 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2975 struct drm_device *dev = &dev_priv->drm;
Damien Lespiauc5511e42014-11-04 17:06:51 +00002976 struct skl_ddb_entry *entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +02002977 struct intel_crtc *crtc;
Damien Lespiauc5511e42014-11-04 17:06:51 +00002978
David Weinehall36cdd012016-08-22 13:59:31 +03002979 if (INTEL_GEN(dev_priv) < 9)
Michal Wajdeczkoab309a62017-12-15 14:36:35 +00002980 return -ENODEV;
Damien Lespiau2fcffe12014-12-03 17:33:24 +00002981
Damien Lespiauc5511e42014-11-04 17:06:51 +00002982 drm_modeset_lock_all(dev);
2983
Damien Lespiauc5511e42014-11-04 17:06:51 +00002984 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2985
Ville Syrjäläff43bc32018-11-27 18:59:00 +02002986 for_each_intel_crtc(&dev_priv->drm, crtc) {
2987 struct intel_crtc_state *crtc_state =
2988 to_intel_crtc_state(crtc->base.state);
2989 enum pipe pipe = crtc->pipe;
2990 enum plane_id plane_id;
2991
Damien Lespiauc5511e42014-11-04 17:06:51 +00002992 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2993
Ville Syrjäläff43bc32018-11-27 18:59:00 +02002994 for_each_plane_id_on_crtc(crtc, plane_id) {
2995 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
2996 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
Damien Lespiauc5511e42014-11-04 17:06:51 +00002997 entry->start, entry->end,
2998 skl_ddb_entry_size(entry));
2999 }
3000
Ville Syrjäläff43bc32018-11-27 18:59:00 +02003001 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
Damien Lespiauc5511e42014-11-04 17:06:51 +00003002 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3003 entry->end, skl_ddb_entry_size(entry));
3004 }
3005
3006 drm_modeset_unlock_all(dev);
3007
3008 return 0;
3009}
3010
Vandana Kannana54746e2015-03-03 20:53:10 +05303011static void drrs_status_per_crtc(struct seq_file *m,
David Weinehall36cdd012016-08-22 13:59:31 +03003012 struct drm_device *dev,
3013 struct intel_crtc *intel_crtc)
Vandana Kannana54746e2015-03-03 20:53:10 +05303014{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003015 struct drm_i915_private *dev_priv = to_i915(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303016 struct i915_drrs *drrs = &dev_priv->drrs;
3017 int vrefresh = 0;
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003018 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003019 struct drm_connector_list_iter conn_iter;
Vandana Kannana54746e2015-03-03 20:53:10 +05303020
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003021 drm_connector_list_iter_begin(dev, &conn_iter);
3022 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003023 if (connector->state->crtc != &intel_crtc->base)
3024 continue;
3025
3026 seq_printf(m, "%s:\n", connector->name);
Vandana Kannana54746e2015-03-03 20:53:10 +05303027 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003028 drm_connector_list_iter_end(&conn_iter);
Vandana Kannana54746e2015-03-03 20:53:10 +05303029
3030 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3031 seq_puts(m, "\tVBT: DRRS_type: Static");
3032 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3033 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3034 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3035 seq_puts(m, "\tVBT: DRRS_type: None");
3036 else
3037 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3038
3039 seq_puts(m, "\n\n");
3040
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003041 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303042 struct intel_panel *panel;
3043
3044 mutex_lock(&drrs->mutex);
3045 /* DRRS Supported */
3046 seq_puts(m, "\tDRRS Supported: Yes\n");
3047
3048 /* disable_drrs() will make drrs->dp NULL */
3049 if (!drrs->dp) {
C, Ramalingamce6e2132017-11-20 09:53:47 +05303050 seq_puts(m, "Idleness DRRS: Disabled\n");
3051 if (dev_priv->psr.enabled)
3052 seq_puts(m,
3053 "\tAs PSR is enabled, DRRS is not enabled\n");
Vandana Kannana54746e2015-03-03 20:53:10 +05303054 mutex_unlock(&drrs->mutex);
3055 return;
3056 }
3057
3058 panel = &drrs->dp->attached_connector->panel;
3059 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3060 drrs->busy_frontbuffer_bits);
3061
3062 seq_puts(m, "\n\t\t");
3063 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3064 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3065 vrefresh = panel->fixed_mode->vrefresh;
3066 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3067 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3068 vrefresh = panel->downclock_mode->vrefresh;
3069 } else {
3070 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3071 drrs->refresh_rate_type);
3072 mutex_unlock(&drrs->mutex);
3073 return;
3074 }
3075 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3076
3077 seq_puts(m, "\n\t\t");
3078 mutex_unlock(&drrs->mutex);
3079 } else {
3080 /* DRRS not supported. Print the VBT parameter*/
3081 seq_puts(m, "\tDRRS Supported : No");
3082 }
3083 seq_puts(m, "\n");
3084}
3085
3086static int i915_drrs_status(struct seq_file *m, void *unused)
3087{
David Weinehall36cdd012016-08-22 13:59:31 +03003088 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3089 struct drm_device *dev = &dev_priv->drm;
Vandana Kannana54746e2015-03-03 20:53:10 +05303090 struct intel_crtc *intel_crtc;
3091 int active_crtc_cnt = 0;
3092
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003093 drm_modeset_lock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303094 for_each_intel_crtc(dev, intel_crtc) {
Maarten Lankhorstf77076c2015-06-01 12:50:08 +02003095 if (intel_crtc->base.state->active) {
Vandana Kannana54746e2015-03-03 20:53:10 +05303096 active_crtc_cnt++;
3097 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3098
3099 drrs_status_per_crtc(m, dev, intel_crtc);
3100 }
Vandana Kannana54746e2015-03-03 20:53:10 +05303101 }
Maarten Lankhorst26875fe2016-06-20 15:57:36 +02003102 drm_modeset_unlock_all(dev);
Vandana Kannana54746e2015-03-03 20:53:10 +05303103
3104 if (!active_crtc_cnt)
3105 seq_puts(m, "No active crtc found\n");
3106
3107 return 0;
3108}
3109
Dave Airlie11bed952014-05-12 15:22:27 +10003110static int i915_dp_mst_info(struct seq_file *m, void *unused)
3111{
David Weinehall36cdd012016-08-22 13:59:31 +03003112 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3113 struct drm_device *dev = &dev_priv->drm;
Dave Airlie11bed952014-05-12 15:22:27 +10003114 struct intel_encoder *intel_encoder;
3115 struct intel_digital_port *intel_dig_port;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003116 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003117 struct drm_connector_list_iter conn_iter;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003118
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003119 drm_connector_list_iter_begin(dev, &conn_iter);
3120 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003121 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
Dave Airlie11bed952014-05-12 15:22:27 +10003122 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003123
3124 intel_encoder = intel_attached_encoder(connector);
3125 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3126 continue;
3127
3128 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
Dave Airlie11bed952014-05-12 15:22:27 +10003129 if (!intel_dig_port->dp.can_mst)
3130 continue;
Maarten Lankhorstb6dabe32016-06-20 15:57:37 +02003131
Jim Bride40ae80c2016-04-14 10:18:37 -07003132 seq_printf(m, "MST Source Port %c\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02003133 port_name(intel_dig_port->base.port));
Dave Airlie11bed952014-05-12 15:22:27 +10003134 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3135 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003136 drm_connector_list_iter_end(&conn_iter);
3137
Dave Airlie11bed952014-05-12 15:22:27 +10003138 return 0;
3139}
3140
Todd Previteeb3394fa2015-04-18 00:04:19 -07003141static ssize_t i915_displayport_test_active_write(struct file *file,
David Weinehall36cdd012016-08-22 13:59:31 +03003142 const char __user *ubuf,
3143 size_t len, loff_t *offp)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003144{
3145 char *input_buffer;
3146 int status = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003147 struct drm_device *dev;
3148 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003149 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003150 struct intel_dp *intel_dp;
3151 int val = 0;
3152
Sudip Mukherjee9aaffa32015-07-21 17:36:45 +05303153 dev = ((struct seq_file *)file->private_data)->private;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003154
Todd Previteeb3394fa2015-04-18 00:04:19 -07003155 if (len == 0)
3156 return 0;
3157
Geliang Tang261aeba2017-05-06 23:40:17 +08003158 input_buffer = memdup_user_nul(ubuf, len);
3159 if (IS_ERR(input_buffer))
3160 return PTR_ERR(input_buffer);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003161
Todd Previteeb3394fa2015-04-18 00:04:19 -07003162 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3163
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003164 drm_connector_list_iter_begin(dev, &conn_iter);
3165 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003166 struct intel_encoder *encoder;
3167
Todd Previteeb3394fa2015-04-18 00:04:19 -07003168 if (connector->connector_type !=
3169 DRM_MODE_CONNECTOR_DisplayPort)
3170 continue;
3171
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003172 encoder = to_intel_encoder(connector->encoder);
3173 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3174 continue;
3175
3176 if (encoder && connector->status == connector_status_connected) {
3177 intel_dp = enc_to_intel_dp(&encoder->base);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003178 status = kstrtoint(input_buffer, 10, &val);
3179 if (status < 0)
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003180 break;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003181 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3182 /* To prevent erroneous activation of the compliance
3183 * testing code, only accept an actual value of 1 here
3184 */
3185 if (val == 1)
Manasi Navarec1617ab2016-12-09 16:22:50 -08003186 intel_dp->compliance.test_active = 1;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003187 else
Manasi Navarec1617ab2016-12-09 16:22:50 -08003188 intel_dp->compliance.test_active = 0;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003189 }
3190 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003191 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003192 kfree(input_buffer);
3193 if (status < 0)
3194 return status;
3195
3196 *offp += len;
3197 return len;
3198}
3199
3200static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3201{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003202 struct drm_i915_private *dev_priv = m->private;
3203 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003204 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003205 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003206 struct intel_dp *intel_dp;
3207
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003208 drm_connector_list_iter_begin(dev, &conn_iter);
3209 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003210 struct intel_encoder *encoder;
3211
Todd Previteeb3394fa2015-04-18 00:04:19 -07003212 if (connector->connector_type !=
3213 DRM_MODE_CONNECTOR_DisplayPort)
3214 continue;
3215
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003216 encoder = to_intel_encoder(connector->encoder);
3217 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3218 continue;
3219
3220 if (encoder && connector->status == connector_status_connected) {
3221 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003222 if (intel_dp->compliance.test_active)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003223 seq_puts(m, "1");
3224 else
3225 seq_puts(m, "0");
3226 } else
3227 seq_puts(m, "0");
3228 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003229 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003230
3231 return 0;
3232}
3233
3234static int i915_displayport_test_active_open(struct inode *inode,
David Weinehall36cdd012016-08-22 13:59:31 +03003235 struct file *file)
Todd Previteeb3394fa2015-04-18 00:04:19 -07003236{
David Weinehall36cdd012016-08-22 13:59:31 +03003237 return single_open(file, i915_displayport_test_active_show,
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003238 inode->i_private);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003239}
3240
3241static const struct file_operations i915_displayport_test_active_fops = {
3242 .owner = THIS_MODULE,
3243 .open = i915_displayport_test_active_open,
3244 .read = seq_read,
3245 .llseek = seq_lseek,
3246 .release = single_release,
3247 .write = i915_displayport_test_active_write
3248};
3249
3250static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3251{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003252 struct drm_i915_private *dev_priv = m->private;
3253 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003254 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003255 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003256 struct intel_dp *intel_dp;
3257
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003258 drm_connector_list_iter_begin(dev, &conn_iter);
3259 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003260 struct intel_encoder *encoder;
3261
Todd Previteeb3394fa2015-04-18 00:04:19 -07003262 if (connector->connector_type !=
3263 DRM_MODE_CONNECTOR_DisplayPort)
3264 continue;
3265
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003266 encoder = to_intel_encoder(connector->encoder);
3267 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3268 continue;
3269
3270 if (encoder && connector->status == connector_status_connected) {
3271 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navareb48a5ba2017-01-20 19:09:28 -08003272 if (intel_dp->compliance.test_type ==
3273 DP_TEST_LINK_EDID_READ)
3274 seq_printf(m, "%lx",
3275 intel_dp->compliance.test_data.edid);
Manasi Navare611032b2017-01-24 08:21:49 -08003276 else if (intel_dp->compliance.test_type ==
3277 DP_TEST_LINK_VIDEO_PATTERN) {
3278 seq_printf(m, "hdisplay: %d\n",
3279 intel_dp->compliance.test_data.hdisplay);
3280 seq_printf(m, "vdisplay: %d\n",
3281 intel_dp->compliance.test_data.vdisplay);
3282 seq_printf(m, "bpc: %u\n",
3283 intel_dp->compliance.test_data.bpc);
3284 }
Todd Previteeb3394fa2015-04-18 00:04:19 -07003285 } else
3286 seq_puts(m, "0");
3287 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003288 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003289
3290 return 0;
3291}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003292DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003293
3294static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3295{
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003296 struct drm_i915_private *dev_priv = m->private;
3297 struct drm_device *dev = &dev_priv->drm;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003298 struct drm_connector *connector;
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003299 struct drm_connector_list_iter conn_iter;
Todd Previteeb3394fa2015-04-18 00:04:19 -07003300 struct intel_dp *intel_dp;
3301
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003302 drm_connector_list_iter_begin(dev, &conn_iter);
3303 drm_for_each_connector_iter(connector, &conn_iter) {
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003304 struct intel_encoder *encoder;
3305
Todd Previteeb3394fa2015-04-18 00:04:19 -07003306 if (connector->connector_type !=
3307 DRM_MODE_CONNECTOR_DisplayPort)
3308 continue;
3309
Maarten Lankhorsta874b6a2017-06-26 10:18:35 +02003310 encoder = to_intel_encoder(connector->encoder);
3311 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3312 continue;
3313
3314 if (encoder && connector->status == connector_status_connected) {
3315 intel_dp = enc_to_intel_dp(&encoder->base);
Manasi Navarec1617ab2016-12-09 16:22:50 -08003316 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003317 } else
3318 seq_puts(m, "0");
3319 }
Daniel Vetter3f6a5e12017-03-01 10:52:21 +01003320 drm_connector_list_iter_end(&conn_iter);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003321
3322 return 0;
3323}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02003324DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
Todd Previteeb3394fa2015-04-18 00:04:19 -07003325
Jani Nikulae5315212019-01-16 11:15:23 +02003326static void wm_latency_show(struct seq_file *m, const u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003327{
David Weinehall36cdd012016-08-22 13:59:31 +03003328 struct drm_i915_private *dev_priv = m->private;
3329 struct drm_device *dev = &dev_priv->drm;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003330 int level;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003331 int num_levels;
3332
David Weinehall36cdd012016-08-22 13:59:31 +03003333 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003334 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003335 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003336 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003337 else if (IS_G4X(dev_priv))
3338 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003339 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003340 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003341
3342 drm_modeset_lock_all(dev);
3343
3344 for (level = 0; level < num_levels; level++) {
3345 unsigned int latency = wm[level];
3346
Damien Lespiau97e94b22014-11-04 17:06:50 +00003347 /*
3348 * - WM1+ latency values in 0.5us units
Ville Syrjäläde38b952015-06-24 22:00:09 +03003349 * - latencies are in us on gen9/vlv/chv
Damien Lespiau97e94b22014-11-04 17:06:50 +00003350 */
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003351 if (INTEL_GEN(dev_priv) >= 9 ||
3352 IS_VALLEYVIEW(dev_priv) ||
3353 IS_CHERRYVIEW(dev_priv) ||
3354 IS_G4X(dev_priv))
Damien Lespiau97e94b22014-11-04 17:06:50 +00003355 latency *= 10;
3356 else if (level > 0)
Ville Syrjälä369a1342014-01-22 14:36:08 +02003357 latency *= 5;
3358
3359 seq_printf(m, "WM%d %u (%u.%u usec)\n",
Damien Lespiau97e94b22014-11-04 17:06:50 +00003360 level, wm[level], latency / 10, latency % 10);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003361 }
3362
3363 drm_modeset_unlock_all(dev);
3364}
3365
3366static int pri_wm_latency_show(struct seq_file *m, void *data)
3367{
David Weinehall36cdd012016-08-22 13:59:31 +03003368 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003369 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003370
David Weinehall36cdd012016-08-22 13:59:31 +03003371 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003372 latencies = dev_priv->wm.skl_latency;
3373 else
David Weinehall36cdd012016-08-22 13:59:31 +03003374 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003375
3376 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003377
3378 return 0;
3379}
3380
3381static int spr_wm_latency_show(struct seq_file *m, void *data)
3382{
David Weinehall36cdd012016-08-22 13:59:31 +03003383 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003384 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003385
David Weinehall36cdd012016-08-22 13:59:31 +03003386 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003387 latencies = dev_priv->wm.skl_latency;
3388 else
David Weinehall36cdd012016-08-22 13:59:31 +03003389 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003390
3391 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003392
3393 return 0;
3394}
3395
3396static int cur_wm_latency_show(struct seq_file *m, void *data)
3397{
David Weinehall36cdd012016-08-22 13:59:31 +03003398 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003399 const u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003400
David Weinehall36cdd012016-08-22 13:59:31 +03003401 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003402 latencies = dev_priv->wm.skl_latency;
3403 else
David Weinehall36cdd012016-08-22 13:59:31 +03003404 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003405
3406 wm_latency_show(m, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003407
3408 return 0;
3409}
3410
3411static int pri_wm_latency_open(struct inode *inode, struct file *file)
3412{
David Weinehall36cdd012016-08-22 13:59:31 +03003413 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003414
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003415 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003416 return -ENODEV;
3417
David Weinehall36cdd012016-08-22 13:59:31 +03003418 return single_open(file, pri_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003419}
3420
3421static int spr_wm_latency_open(struct inode *inode, struct file *file)
3422{
David Weinehall36cdd012016-08-22 13:59:31 +03003423 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003424
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003425 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003426 return -ENODEV;
3427
David Weinehall36cdd012016-08-22 13:59:31 +03003428 return single_open(file, spr_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003429}
3430
3431static int cur_wm_latency_open(struct inode *inode, struct file *file)
3432{
David Weinehall36cdd012016-08-22 13:59:31 +03003433 struct drm_i915_private *dev_priv = inode->i_private;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003434
Rodrigo Vivib2ae3182019-02-04 14:25:38 -08003435 if (HAS_GMCH(dev_priv))
Ville Syrjälä369a1342014-01-22 14:36:08 +02003436 return -ENODEV;
3437
David Weinehall36cdd012016-08-22 13:59:31 +03003438 return single_open(file, cur_wm_latency_show, dev_priv);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003439}
3440
3441static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
Jani Nikulae5315212019-01-16 11:15:23 +02003442 size_t len, loff_t *offp, u16 wm[8])
Ville Syrjälä369a1342014-01-22 14:36:08 +02003443{
3444 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003445 struct drm_i915_private *dev_priv = m->private;
3446 struct drm_device *dev = &dev_priv->drm;
Jani Nikulae5315212019-01-16 11:15:23 +02003447 u16 new[8] = { 0 };
Ville Syrjäläde38b952015-06-24 22:00:09 +03003448 int num_levels;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003449 int level;
3450 int ret;
3451 char tmp[32];
3452
David Weinehall36cdd012016-08-22 13:59:31 +03003453 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003454 num_levels = 3;
David Weinehall36cdd012016-08-22 13:59:31 +03003455 else if (IS_VALLEYVIEW(dev_priv))
Ville Syrjäläde38b952015-06-24 22:00:09 +03003456 num_levels = 1;
Ville Syrjälä04548cb2017-04-21 21:14:29 +03003457 else if (IS_G4X(dev_priv))
3458 num_levels = 3;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003459 else
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003460 num_levels = ilk_wm_max_level(dev_priv) + 1;
Ville Syrjäläde38b952015-06-24 22:00:09 +03003461
Ville Syrjälä369a1342014-01-22 14:36:08 +02003462 if (len >= sizeof(tmp))
3463 return -EINVAL;
3464
3465 if (copy_from_user(tmp, ubuf, len))
3466 return -EFAULT;
3467
3468 tmp[len] = '\0';
3469
Damien Lespiau97e94b22014-11-04 17:06:50 +00003470 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3471 &new[0], &new[1], &new[2], &new[3],
3472 &new[4], &new[5], &new[6], &new[7]);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003473 if (ret != num_levels)
3474 return -EINVAL;
3475
3476 drm_modeset_lock_all(dev);
3477
3478 for (level = 0; level < num_levels; level++)
3479 wm[level] = new[level];
3480
3481 drm_modeset_unlock_all(dev);
3482
3483 return len;
3484}
3485
3486
3487static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3488 size_t len, loff_t *offp)
3489{
3490 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003491 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003492 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003493
David Weinehall36cdd012016-08-22 13:59:31 +03003494 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003495 latencies = dev_priv->wm.skl_latency;
3496 else
David Weinehall36cdd012016-08-22 13:59:31 +03003497 latencies = dev_priv->wm.pri_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003498
3499 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003500}
3501
3502static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3503 size_t len, loff_t *offp)
3504{
3505 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003506 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003507 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003508
David Weinehall36cdd012016-08-22 13:59:31 +03003509 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003510 latencies = dev_priv->wm.skl_latency;
3511 else
David Weinehall36cdd012016-08-22 13:59:31 +03003512 latencies = dev_priv->wm.spr_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003513
3514 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003515}
3516
3517static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3518 size_t len, loff_t *offp)
3519{
3520 struct seq_file *m = file->private_data;
David Weinehall36cdd012016-08-22 13:59:31 +03003521 struct drm_i915_private *dev_priv = m->private;
Jani Nikulae5315212019-01-16 11:15:23 +02003522 u16 *latencies;
Ville Syrjälä369a1342014-01-22 14:36:08 +02003523
David Weinehall36cdd012016-08-22 13:59:31 +03003524 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau97e94b22014-11-04 17:06:50 +00003525 latencies = dev_priv->wm.skl_latency;
3526 else
David Weinehall36cdd012016-08-22 13:59:31 +03003527 latencies = dev_priv->wm.cur_latency;
Damien Lespiau97e94b22014-11-04 17:06:50 +00003528
3529 return wm_latency_write(file, ubuf, len, offp, latencies);
Ville Syrjälä369a1342014-01-22 14:36:08 +02003530}
3531
3532static const struct file_operations i915_pri_wm_latency_fops = {
3533 .owner = THIS_MODULE,
3534 .open = pri_wm_latency_open,
3535 .read = seq_read,
3536 .llseek = seq_lseek,
3537 .release = single_release,
3538 .write = pri_wm_latency_write
3539};
3540
3541static const struct file_operations i915_spr_wm_latency_fops = {
3542 .owner = THIS_MODULE,
3543 .open = spr_wm_latency_open,
3544 .read = seq_read,
3545 .llseek = seq_lseek,
3546 .release = single_release,
3547 .write = spr_wm_latency_write
3548};
3549
3550static const struct file_operations i915_cur_wm_latency_fops = {
3551 .owner = THIS_MODULE,
3552 .open = cur_wm_latency_open,
3553 .read = seq_read,
3554 .llseek = seq_lseek,
3555 .release = single_release,
3556 .write = cur_wm_latency_write
3557};
3558
Kees Cook647416f2013-03-10 14:10:06 -07003559static int
3560i915_wedged_get(void *data, u64 *val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003561{
Chris Wilsoncb823ed2019-07-12 20:29:53 +01003562 struct drm_i915_private *i915 = data;
3563 int ret = intel_gt_terminally_wedged(&i915->gt);
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003564
Chris Wilsonc41166f2019-02-20 14:56:37 +00003565 switch (ret) {
3566 case -EIO:
3567 *val = 1;
3568 return 0;
3569 case 0:
3570 *val = 0;
3571 return 0;
3572 default:
3573 return ret;
3574 }
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003575}
3576
Kees Cook647416f2013-03-10 14:10:06 -07003577static int
3578i915_wedged_set(void *data, u64 val)
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003579{
Chris Wilson598b6b52017-03-25 13:47:35 +00003580 struct drm_i915_private *i915 = data;
Imre Deakd46c0512014-04-14 20:24:27 +03003581
Chris Wilson15cbf002019-02-08 15:37:06 +00003582 /* Flush any previous reset before applying for a new one */
Chris Wilsoncb823ed2019-07-12 20:29:53 +01003583 wait_event(i915->gt.reset.queue,
3584 !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003585
Chris Wilsoncb823ed2019-07-12 20:29:53 +01003586 intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3587 "Manually set wedged engine mask = %llx", val);
Kees Cook647416f2013-03-10 14:10:06 -07003588 return 0;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003589}
3590
Kees Cook647416f2013-03-10 14:10:06 -07003591DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3592 i915_wedged_get, i915_wedged_set,
Mika Kuoppala3a3b4f92013-04-12 12:10:05 +03003593 "%llu\n");
Chris Wilsonf3cd4742009-10-13 22:20:20 +01003594
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003595#define DROP_UNBOUND BIT(0)
3596#define DROP_BOUND BIT(1)
3597#define DROP_RETIRE BIT(2)
3598#define DROP_ACTIVE BIT(3)
3599#define DROP_FREED BIT(4)
3600#define DROP_SHRINK_ALL BIT(5)
3601#define DROP_IDLE BIT(6)
Chris Wilson6b048702018-09-03 09:33:37 +01003602#define DROP_RESET_ACTIVE BIT(7)
3603#define DROP_RESET_SEQNO BIT(8)
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003604#define DROP_ALL (DROP_UNBOUND | \
3605 DROP_BOUND | \
3606 DROP_RETIRE | \
3607 DROP_ACTIVE | \
Chris Wilson8eadc192017-03-08 14:46:22 +00003608 DROP_FREED | \
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003609 DROP_SHRINK_ALL |\
Chris Wilson6b048702018-09-03 09:33:37 +01003610 DROP_IDLE | \
3611 DROP_RESET_ACTIVE | \
3612 DROP_RESET_SEQNO)
Kees Cook647416f2013-03-10 14:10:06 -07003613static int
3614i915_drop_caches_get(void *data, u64 *val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003615{
Kees Cook647416f2013-03-10 14:10:06 -07003616 *val = DROP_ALL;
Chris Wilsondd624af2013-01-15 12:39:35 +00003617
Kees Cook647416f2013-03-10 14:10:06 -07003618 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003619}
3620
Kees Cook647416f2013-03-10 14:10:06 -07003621static int
3622i915_drop_caches_set(void *data, u64 val)
Chris Wilsondd624af2013-01-15 12:39:35 +00003623{
Chris Wilson6b048702018-09-03 09:33:37 +01003624 struct drm_i915_private *i915 = data;
Chris Wilsondd624af2013-01-15 12:39:35 +00003625
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003626 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3627 val, val & DROP_ALL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003628
Chris Wilsonad4062d2019-01-28 01:02:18 +00003629 if (val & DROP_RESET_ACTIVE &&
Chris Wilsoncb823ed2019-07-12 20:29:53 +01003630 wait_for(intel_engines_are_idle(&i915->gt),
3631 I915_IDLE_ENGINES_TIMEOUT))
3632 intel_gt_set_wedged(&i915->gt);
Chris Wilson6b048702018-09-03 09:33:37 +01003633
Chris Wilsondd624af2013-01-15 12:39:35 +00003634 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3635 * on ioctls on -EAGAIN. */
Chris Wilsonba000162019-05-07 13:11:05 +01003636 if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
Chris Wilson6cffeb82019-03-18 09:51:49 +00003637 int ret;
3638
Chris Wilson6b048702018-09-03 09:33:37 +01003639 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
Chris Wilsondd624af2013-01-15 12:39:35 +00003640 if (ret)
Chris Wilson6cffeb82019-03-18 09:51:49 +00003641 return ret;
Chris Wilsondd624af2013-01-15 12:39:35 +00003642
Chris Wilsonba000162019-05-07 13:11:05 +01003643 /*
3644 * To finish the flush of the idle_worker, we must complete
3645 * the switch-to-kernel-context, which requires a double
3646 * pass through wait_for_idle: first queues the switch,
3647 * second waits for the switch.
3648 */
3649 if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
3650 ret = i915_gem_wait_for_idle(i915,
3651 I915_WAIT_INTERRUPTIBLE |
3652 I915_WAIT_LOCKED,
3653 MAX_SCHEDULE_TIMEOUT);
3654
3655 if (ret == 0 && val & DROP_IDLE)
Chris Wilson6b048702018-09-03 09:33:37 +01003656 ret = i915_gem_wait_for_idle(i915,
Chris Wilson00c26cf2017-05-24 17:26:53 +01003657 I915_WAIT_INTERRUPTIBLE |
Chris Wilsonec625fb2018-07-09 13:20:42 +01003658 I915_WAIT_LOCKED,
3659 MAX_SCHEDULE_TIMEOUT);
Chris Wilson00c26cf2017-05-24 17:26:53 +01003660
Chris Wilson6b048702018-09-03 09:33:37 +01003661 if (val & DROP_RETIRE)
3662 i915_retire_requests(i915);
3663
3664 mutex_unlock(&i915->drm.struct_mutex);
Chris Wilsonc7302f22019-08-08 21:27:58 +01003665
3666 if (ret == 0 && val & DROP_IDLE)
3667 ret = intel_gt_pm_wait_for_idle(&i915->gt);
Chris Wilson6b048702018-09-03 09:33:37 +01003668 }
3669
Chris Wilsoncb823ed2019-07-12 20:29:53 +01003670 if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
3671 intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
Chris Wilsondd624af2013-01-15 12:39:35 +00003672
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003673 fs_reclaim_acquire(GFP_KERNEL);
Chris Wilson21ab4e72014-09-09 11:16:08 +01003674 if (val & DROP_BOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003675 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
Chris Wilson4ad72b72014-09-03 19:23:37 +01003676
Chris Wilson21ab4e72014-09-09 11:16:08 +01003677 if (val & DROP_UNBOUND)
Chris Wilson6b048702018-09-03 09:33:37 +01003678 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
Chris Wilsondd624af2013-01-15 12:39:35 +00003679
Chris Wilson8eadc192017-03-08 14:46:22 +00003680 if (val & DROP_SHRINK_ALL)
Chris Wilson6b048702018-09-03 09:33:37 +01003681 i915_gem_shrink_all(i915);
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +01003682 fs_reclaim_release(GFP_KERNEL);
Chris Wilson8eadc192017-03-08 14:46:22 +00003683
Chris Wilson4dfacb02018-05-31 09:22:43 +01003684 if (val & DROP_IDLE) {
Chris Wilson39705642019-05-07 13:11:08 +01003685 flush_delayed_work(&i915->gem.retire_work);
Chris Wilsonae230632019-05-07 13:11:06 +01003686 flush_work(&i915->gem.idle_work);
Chris Wilson4dfacb02018-05-31 09:22:43 +01003687 }
Chris Wilsonb4a0b322017-10-18 13:16:21 +01003688
Chris Wilsonc9c704712018-02-19 22:06:31 +00003689 if (val & DROP_FREED)
Chris Wilson6b048702018-09-03 09:33:37 +01003690 i915_gem_drain_freed_objects(i915);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +01003691
Chris Wilson6cffeb82019-03-18 09:51:49 +00003692 return 0;
Chris Wilsondd624af2013-01-15 12:39:35 +00003693}
3694
Kees Cook647416f2013-03-10 14:10:06 -07003695DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3696 i915_drop_caches_get, i915_drop_caches_set,
3697 "0x%08llx\n");
Chris Wilsondd624af2013-01-15 12:39:35 +00003698
Kees Cook647416f2013-03-10 14:10:06 -07003699static int
Kees Cook647416f2013-03-10 14:10:06 -07003700i915_cache_sharing_get(void *data, u64 *val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003701{
David Weinehall36cdd012016-08-22 13:59:31 +03003702 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003703 intel_wakeref_t wakeref;
Chris Wilsond4225a52019-01-14 14:21:23 +00003704 u32 snpcr = 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003705
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003706 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003707 return -ENODEV;
3708
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -07003709 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
Chris Wilsond4225a52019-01-14 14:21:23 +00003710 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003711
Kees Cook647416f2013-03-10 14:10:06 -07003712 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003713
Kees Cook647416f2013-03-10 14:10:06 -07003714 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003715}
3716
Kees Cook647416f2013-03-10 14:10:06 -07003717static int
3718i915_cache_sharing_set(void *data, u64 val)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003719{
David Weinehall36cdd012016-08-22 13:59:31 +03003720 struct drm_i915_private *dev_priv = data;
Chris Wilsona0371212019-01-14 14:21:14 +00003721 intel_wakeref_t wakeref;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003722
Lucas De Marchif3ce44a2018-12-12 10:10:44 -08003723 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
Daniel Vetter004777c2012-08-09 15:07:01 +02003724 return -ENODEV;
3725
Kees Cook647416f2013-03-10 14:10:06 -07003726 if (val > 3)
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003727 return -EINVAL;
3728
Kees Cook647416f2013-03-10 14:10:06 -07003729 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -07003730 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
Chris Wilsond4225a52019-01-14 14:21:23 +00003731 u32 snpcr;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003732
Chris Wilsond4225a52019-01-14 14:21:23 +00003733 /* Update the cache sharing policy here as well */
3734 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3735 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3736 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3737 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3738 }
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003739
Kees Cook647416f2013-03-10 14:10:06 -07003740 return 0;
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003741}
3742
Kees Cook647416f2013-03-10 14:10:06 -07003743DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3744 i915_cache_sharing_get, i915_cache_sharing_set,
3745 "%llu\n");
Jesse Barnes07b7ddd2011-08-03 11:28:44 -07003746
David Weinehall36cdd012016-08-22 13:59:31 +03003747static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03003748 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07003749{
Chris Wilson7aa0b142018-03-13 00:40:54 +00003750#define SS_MAX 2
3751 const int ss_max = SS_MAX;
3752 u32 sig1[SS_MAX], sig2[SS_MAX];
Jeff McGee5d395252015-04-03 18:13:17 -07003753 int ss;
Jeff McGee5d395252015-04-03 18:13:17 -07003754
3755 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3756 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3757 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3758 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3759
3760 for (ss = 0; ss < ss_max; ss++) {
3761 unsigned int eu_cnt;
3762
3763 if (sig1[ss] & CHV_SS_PG_ENABLE)
3764 /* skip disabled subslice */
3765 continue;
3766
Imre Deakf08a0c92016-08-31 19:13:04 +03003767 sseu->slice_mask = BIT(0);
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00003768 sseu->subslice_mask[0] |= BIT(ss);
Jeff McGee5d395252015-04-03 18:13:17 -07003769 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3770 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3771 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3772 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
Imre Deak915490d2016-08-31 19:13:01 +03003773 sseu->eu_total += eu_cnt;
3774 sseu->eu_per_subslice = max_t(unsigned int,
3775 sseu->eu_per_subslice, eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07003776 }
Chris Wilson7aa0b142018-03-13 00:40:54 +00003777#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07003778}
3779
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07003780static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3781 struct sseu_dev_info *sseu)
3782{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00003783#define SS_MAX 6
Jani Nikula02584042018-12-31 16:56:41 +02003784 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00003785 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07003786 int s, ss;
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07003787
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00003788 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07003789 /*
3790 * FIXME: Valid SS Mask respects the spec and read
Alexandre Belloni3c64ea82018-11-20 16:14:15 +01003791 * only valid bits for those registers, excluding reserved
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07003792 * although this seems wrong because it would leave many
3793 * subslices without ACK.
3794 */
3795 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3796 GEN10_PGCTL_VALID_SS_MASK(s);
3797 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3798 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3799 }
3800
3801 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3802 GEN9_PGCTL_SSA_EU19_ACK |
3803 GEN9_PGCTL_SSA_EU210_ACK |
3804 GEN9_PGCTL_SSA_EU311_ACK;
3805 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3806 GEN9_PGCTL_SSB_EU19_ACK |
3807 GEN9_PGCTL_SSB_EU210_ACK |
3808 GEN9_PGCTL_SSB_EU311_ACK;
3809
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00003810 for (s = 0; s < info->sseu.max_slices; s++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07003811 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3812 /* skip disabled slice */
3813 continue;
3814
3815 sseu->slice_mask |= BIT(s);
Jani Nikulaa10f3612019-05-29 11:21:50 +03003816 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07003817
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00003818 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07003819 unsigned int eu_cnt;
3820
3821 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3822 /* skip disabled subslice */
3823 continue;
3824
3825 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3826 eu_mask[ss % 2]);
3827 sseu->eu_total += eu_cnt;
3828 sseu->eu_per_subslice = max_t(unsigned int,
3829 sseu->eu_per_subslice,
3830 eu_cnt);
3831 }
3832 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00003833#undef SS_MAX
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07003834}
3835
David Weinehall36cdd012016-08-22 13:59:31 +03003836static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03003837 struct sseu_dev_info *sseu)
Jeff McGee5d395252015-04-03 18:13:17 -07003838{
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00003839#define SS_MAX 3
Jani Nikula02584042018-12-31 16:56:41 +02003840 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00003841 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
Jeff McGee5d395252015-04-03 18:13:17 -07003842 int s, ss;
Jeff McGee5d395252015-04-03 18:13:17 -07003843
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00003844 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee1c046bc2015-04-03 18:13:18 -07003845 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3846 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3847 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3848 }
3849
Jeff McGee5d395252015-04-03 18:13:17 -07003850 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3851 GEN9_PGCTL_SSA_EU19_ACK |
3852 GEN9_PGCTL_SSA_EU210_ACK |
3853 GEN9_PGCTL_SSA_EU311_ACK;
3854 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3855 GEN9_PGCTL_SSB_EU19_ACK |
3856 GEN9_PGCTL_SSB_EU210_ACK |
3857 GEN9_PGCTL_SSB_EU311_ACK;
3858
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00003859 for (s = 0; s < info->sseu.max_slices; s++) {
Jeff McGee5d395252015-04-03 18:13:17 -07003860 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3861 /* skip disabled slice */
3862 continue;
3863
Imre Deakf08a0c92016-08-31 19:13:04 +03003864 sseu->slice_mask |= BIT(s);
Jeff McGee1c046bc2015-04-03 18:13:18 -07003865
Rodrigo Vivif8c3dcf2017-10-25 17:15:46 -07003866 if (IS_GEN9_BC(dev_priv))
Jani Nikulaa10f3612019-05-29 11:21:50 +03003867 sseu->subslice_mask[s] =
3868 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
Jeff McGee1c046bc2015-04-03 18:13:18 -07003869
Lionel Landwerlinb3e7f862018-03-06 12:28:53 +00003870 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
Jeff McGee5d395252015-04-03 18:13:17 -07003871 unsigned int eu_cnt;
3872
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02003873 if (IS_GEN9_LP(dev_priv)) {
Imre Deak57ec1712016-08-31 19:13:05 +03003874 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3875 /* skip disabled subslice */
3876 continue;
Jeff McGee1c046bc2015-04-03 18:13:18 -07003877
Jani Nikulaa10f3612019-05-29 11:21:50 +03003878 sseu->subslice_mask[s] |= BIT(ss);
Imre Deak57ec1712016-08-31 19:13:05 +03003879 }
Jeff McGee1c046bc2015-04-03 18:13:18 -07003880
Jeff McGee5d395252015-04-03 18:13:17 -07003881 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3882 eu_mask[ss%2]);
Imre Deak915490d2016-08-31 19:13:01 +03003883 sseu->eu_total += eu_cnt;
3884 sseu->eu_per_subslice = max_t(unsigned int,
3885 sseu->eu_per_subslice,
3886 eu_cnt);
Jeff McGee5d395252015-04-03 18:13:17 -07003887 }
3888 }
Chris Wilsonc7fb3c62018-03-13 11:31:49 +00003889#undef SS_MAX
Jeff McGee5d395252015-04-03 18:13:17 -07003890}
3891
David Weinehall36cdd012016-08-22 13:59:31 +03003892static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
Imre Deak915490d2016-08-31 19:13:01 +03003893 struct sseu_dev_info *sseu)
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02003894{
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02003895 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
David Weinehall36cdd012016-08-22 13:59:31 +03003896 int s;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02003897
Imre Deakf08a0c92016-08-31 19:13:04 +03003898 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02003899
Imre Deakf08a0c92016-08-31 19:13:04 +03003900 if (sseu->slice_mask) {
Jani Nikulaa10f3612019-05-29 11:21:50 +03003901 sseu->eu_per_subslice =
3902 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
3903 for (s = 0; s < fls(sseu->slice_mask); s++) {
3904 sseu->subslice_mask[s] =
3905 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3906 }
Imre Deak57ec1712016-08-31 19:13:05 +03003907 sseu->eu_total = sseu->eu_per_subslice *
Stuart Summers0040fd12019-05-24 08:40:21 -07003908 intel_sseu_subslice_total(sseu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02003909
3910 /* subtract fused off EU(s) from enabled slice(s) */
Imre Deak795b38b2016-08-31 19:13:07 +03003911 for (s = 0; s < fls(sseu->slice_mask); s++) {
Jani Nikulaa10f3612019-05-29 11:21:50 +03003912 u8 subslice_7eu =
3913 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02003914
Imre Deak915490d2016-08-31 19:13:01 +03003915 sseu->eu_total -= hweight8(subslice_7eu);
Łukasz Daniluk91bedd32015-09-25 11:54:58 +02003916 }
3917 }
3918}
3919
Imre Deak615d8902016-08-31 19:13:03 +03003920static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3921 const struct sseu_dev_info *sseu)
3922{
3923 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3924 const char *type = is_available_info ? "Available" : "Enabled";
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00003925 int s;
Imre Deak615d8902016-08-31 19:13:03 +03003926
Imre Deakc67ba532016-08-31 19:13:06 +03003927 seq_printf(m, " %s Slice Mask: %04x\n", type,
3928 sseu->slice_mask);
Imre Deak615d8902016-08-31 19:13:03 +03003929 seq_printf(m, " %s Slice Total: %u\n", type,
Imre Deakf08a0c92016-08-31 19:13:04 +03003930 hweight8(sseu->slice_mask));
Imre Deak615d8902016-08-31 19:13:03 +03003931 seq_printf(m, " %s Subslice Total: %u\n", type,
Stuart Summers0040fd12019-05-24 08:40:21 -07003932 intel_sseu_subslice_total(sseu));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00003933 for (s = 0; s < fls(sseu->slice_mask); s++) {
3934 seq_printf(m, " %s Slice%i subslices: %u\n", type,
Stuart Summersb5ab1ab2019-05-24 08:40:20 -07003935 s, intel_sseu_subslices_per_slice(sseu, s));
Lionel Landwerlin8cc76692018-03-06 12:28:52 +00003936 }
Imre Deak615d8902016-08-31 19:13:03 +03003937 seq_printf(m, " %s EU Total: %u\n", type,
3938 sseu->eu_total);
3939 seq_printf(m, " %s EU Per Subslice: %u\n", type,
3940 sseu->eu_per_subslice);
3941
3942 if (!is_available_info)
3943 return;
3944
3945 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
3946 if (HAS_POOLED_EU(dev_priv))
3947 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
3948
3949 seq_printf(m, " Has Slice Power Gating: %s\n",
3950 yesno(sseu->has_slice_pg));
3951 seq_printf(m, " Has Subslice Power Gating: %s\n",
3952 yesno(sseu->has_subslice_pg));
3953 seq_printf(m, " Has EU Power Gating: %s\n",
3954 yesno(sseu->has_eu_pg));
3955}
3956
Jeff McGee38732182015-02-13 10:27:54 -06003957static int i915_sseu_status(struct seq_file *m, void *unused)
3958{
David Weinehall36cdd012016-08-22 13:59:31 +03003959 struct drm_i915_private *dev_priv = node_to_i915(m->private);
Imre Deak915490d2016-08-31 19:13:01 +03003960 struct sseu_dev_info sseu;
Chris Wilsona0371212019-01-14 14:21:14 +00003961 intel_wakeref_t wakeref;
Jeff McGee38732182015-02-13 10:27:54 -06003962
David Weinehall36cdd012016-08-22 13:59:31 +03003963 if (INTEL_GEN(dev_priv) < 8)
Jeff McGee38732182015-02-13 10:27:54 -06003964 return -ENODEV;
3965
3966 seq_puts(m, "SSEU Device Info\n");
Jani Nikulaa10f3612019-05-29 11:21:50 +03003967 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
Jeff McGee38732182015-02-13 10:27:54 -06003968
Jeff McGee7f992ab2015-02-13 10:27:55 -06003969 seq_puts(m, "SSEU Device Status\n");
Imre Deak915490d2016-08-31 19:13:01 +03003970 memset(&sseu, 0, sizeof(sseu));
Jani Nikulaa10f3612019-05-29 11:21:50 +03003971 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
3972 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
3973 sseu.max_eus_per_subslice =
3974 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
David Weinehall238010e2016-08-01 17:33:27 +03003975
Daniele Ceraolo Spurioc447ff72019-06-13 16:21:55 -07003976 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
Chris Wilsond4225a52019-01-14 14:21:23 +00003977 if (IS_CHERRYVIEW(dev_priv))
3978 cherryview_sseu_device_status(dev_priv, &sseu);
3979 else if (IS_BROADWELL(dev_priv))
3980 broadwell_sseu_device_status(dev_priv, &sseu);
3981 else if (IS_GEN(dev_priv, 9))
3982 gen9_sseu_device_status(dev_priv, &sseu);
3983 else if (INTEL_GEN(dev_priv) >= 10)
3984 gen10_sseu_device_status(dev_priv, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06003985 }
David Weinehall238010e2016-08-01 17:33:27 +03003986
Imre Deak615d8902016-08-31 19:13:03 +03003987 i915_print_sseu_info(m, false, &sseu);
Jeff McGee7f992ab2015-02-13 10:27:55 -06003988
Jeff McGee38732182015-02-13 10:27:54 -06003989 return 0;
3990}
3991
Ben Widawsky6d794d42011-04-25 11:25:56 -07003992static int i915_forcewake_open(struct inode *inode, struct file *file)
3993{
Chris Wilsond7a133d2017-09-07 14:44:41 +01003994 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07003995
Chris Wilsond7a133d2017-09-07 14:44:41 +01003996 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07003997 return 0;
3998
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07003999 file->private_data =
4000 (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07004001 intel_uncore_forcewake_user_get(&i915->uncore);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004002
4003 return 0;
4004}
4005
Ben Widawskyc43b5632012-04-16 14:07:40 -07004006static int i915_forcewake_release(struct inode *inode, struct file *file)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004007{
Chris Wilsond7a133d2017-09-07 14:44:41 +01004008 struct drm_i915_private *i915 = inode->i_private;
Ben Widawsky6d794d42011-04-25 11:25:56 -07004009
Chris Wilsond7a133d2017-09-07 14:44:41 +01004010 if (INTEL_GEN(i915) < 6)
Ben Widawsky6d794d42011-04-25 11:25:56 -07004011 return 0;
4012
Daniele Ceraolo Spurio3ceea6a2019-03-19 11:35:36 -07004013 intel_uncore_forcewake_user_put(&i915->uncore);
Daniele Ceraolo Spuriod858d562019-06-13 16:21:54 -07004014 intel_runtime_pm_put(&i915->runtime_pm,
Tvrtko Ursulin6ddbb12e2019-01-17 14:48:31 +00004015 (intel_wakeref_t)(uintptr_t)file->private_data);
Ben Widawsky6d794d42011-04-25 11:25:56 -07004016
4017 return 0;
4018}
4019
4020static const struct file_operations i915_forcewake_fops = {
4021 .owner = THIS_MODULE,
4022 .open = i915_forcewake_open,
4023 .release = i915_forcewake_release,
4024};
4025
Lyude317eaa92017-02-03 21:18:25 -05004026static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4027{
4028 struct drm_i915_private *dev_priv = m->private;
4029 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4030
Lyude Paul6fc5d782018-11-20 19:37:17 -05004031 /* Synchronize with everything first in case there's been an HPD
4032 * storm, but we haven't finished handling it in the kernel yet
4033 */
Ville Syrjälä315ca4c2019-07-02 18:17:23 +03004034 intel_synchronize_irq(dev_priv);
Lyude Paul6fc5d782018-11-20 19:37:17 -05004035 flush_work(&dev_priv->hotplug.dig_port_work);
Imre Deak39447092019-07-11 17:53:42 -07004036 flush_delayed_work(&dev_priv->hotplug.hotplug_work);
Lyude Paul6fc5d782018-11-20 19:37:17 -05004037
Lyude317eaa92017-02-03 21:18:25 -05004038 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4039 seq_printf(m, "Detected: %s\n",
4040 yesno(delayed_work_pending(&hotplug->reenable_work)));
4041
4042 return 0;
4043}
4044
4045static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4046 const char __user *ubuf, size_t len,
4047 loff_t *offp)
4048{
4049 struct seq_file *m = file->private_data;
4050 struct drm_i915_private *dev_priv = m->private;
4051 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4052 unsigned int new_threshold;
4053 int i;
4054 char *newline;
4055 char tmp[16];
4056
4057 if (len >= sizeof(tmp))
4058 return -EINVAL;
4059
4060 if (copy_from_user(tmp, ubuf, len))
4061 return -EFAULT;
4062
4063 tmp[len] = '\0';
4064
4065 /* Strip newline, if any */
4066 newline = strchr(tmp, '\n');
4067 if (newline)
4068 *newline = '\0';
4069
4070 if (strcmp(tmp, "reset") == 0)
4071 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4072 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4073 return -EINVAL;
4074
4075 if (new_threshold > 0)
4076 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4077 new_threshold);
4078 else
4079 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4080
4081 spin_lock_irq(&dev_priv->irq_lock);
4082 hotplug->hpd_storm_threshold = new_threshold;
4083 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4084 for_each_hpd_pin(i)
4085 hotplug->stats[i].count = 0;
4086 spin_unlock_irq(&dev_priv->irq_lock);
4087
4088 /* Re-enable hpd immediately if we were in an irq storm */
4089 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4090
4091 return len;
4092}
4093
4094static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4095{
4096 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4097}
4098
4099static const struct file_operations i915_hpd_storm_ctl_fops = {
4100 .owner = THIS_MODULE,
4101 .open = i915_hpd_storm_ctl_open,
4102 .read = seq_read,
4103 .llseek = seq_lseek,
4104 .release = single_release,
4105 .write = i915_hpd_storm_ctl_write
4106};
4107
Lyude Paul9a64c652018-11-06 16:30:16 -05004108static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4109{
4110 struct drm_i915_private *dev_priv = m->private;
4111
4112 seq_printf(m, "Enabled: %s\n",
4113 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4114
4115 return 0;
4116}
4117
4118static int
4119i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4120{
4121 return single_open(file, i915_hpd_short_storm_ctl_show,
4122 inode->i_private);
4123}
4124
4125static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4126 const char __user *ubuf,
4127 size_t len, loff_t *offp)
4128{
4129 struct seq_file *m = file->private_data;
4130 struct drm_i915_private *dev_priv = m->private;
4131 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4132 char *newline;
4133 char tmp[16];
4134 int i;
4135 bool new_state;
4136
4137 if (len >= sizeof(tmp))
4138 return -EINVAL;
4139
4140 if (copy_from_user(tmp, ubuf, len))
4141 return -EFAULT;
4142
4143 tmp[len] = '\0';
4144
4145 /* Strip newline, if any */
4146 newline = strchr(tmp, '\n');
4147 if (newline)
4148 *newline = '\0';
4149
4150 /* Reset to the "default" state for this system */
4151 if (strcmp(tmp, "reset") == 0)
4152 new_state = !HAS_DP_MST(dev_priv);
4153 else if (kstrtobool(tmp, &new_state) != 0)
4154 return -EINVAL;
4155
4156 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4157 new_state ? "En" : "Dis");
4158
4159 spin_lock_irq(&dev_priv->irq_lock);
4160 hotplug->hpd_short_storm_enabled = new_state;
4161 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4162 for_each_hpd_pin(i)
4163 hotplug->stats[i].count = 0;
4164 spin_unlock_irq(&dev_priv->irq_lock);
4165
4166 /* Re-enable hpd immediately if we were in an irq storm */
4167 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4168
4169 return len;
4170}
4171
4172static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4173 .owner = THIS_MODULE,
4174 .open = i915_hpd_short_storm_ctl_open,
4175 .read = seq_read,
4176 .llseek = seq_lseek,
4177 .release = single_release,
4178 .write = i915_hpd_short_storm_ctl_write,
4179};
4180
C, Ramalingam35954e82017-11-08 00:08:23 +05304181static int i915_drrs_ctl_set(void *data, u64 val)
4182{
4183 struct drm_i915_private *dev_priv = data;
4184 struct drm_device *dev = &dev_priv->drm;
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004185 struct intel_crtc *crtc;
C, Ramalingam35954e82017-11-08 00:08:23 +05304186
4187 if (INTEL_GEN(dev_priv) < 7)
4188 return -ENODEV;
4189
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004190 for_each_intel_crtc(dev, crtc) {
4191 struct drm_connector_list_iter conn_iter;
4192 struct intel_crtc_state *crtc_state;
4193 struct drm_connector *connector;
4194 struct drm_crtc_commit *commit;
4195 int ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304196
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004197 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4198 if (ret)
4199 return ret;
4200
4201 crtc_state = to_intel_crtc_state(crtc->base.state);
4202
4203 if (!crtc_state->base.active ||
4204 !crtc_state->has_drrs)
4205 goto out;
4206
4207 commit = crtc_state->base.commit;
4208 if (commit) {
4209 ret = wait_for_completion_interruptible(&commit->hw_done);
4210 if (ret)
4211 goto out;
4212 }
4213
4214 drm_connector_list_iter_begin(dev, &conn_iter);
4215 drm_for_each_connector_iter(connector, &conn_iter) {
4216 struct intel_encoder *encoder;
4217 struct intel_dp *intel_dp;
4218
4219 if (!(crtc_state->base.connector_mask &
4220 drm_connector_mask(connector)))
4221 continue;
4222
4223 encoder = intel_attached_encoder(connector);
C, Ramalingam35954e82017-11-08 00:08:23 +05304224 if (encoder->type != INTEL_OUTPUT_EDP)
4225 continue;
4226
4227 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4228 val ? "en" : "dis", val);
4229
4230 intel_dp = enc_to_intel_dp(&encoder->base);
4231 if (val)
4232 intel_edp_drrs_enable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004233 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304234 else
4235 intel_edp_drrs_disable(intel_dp,
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004236 crtc_state);
C, Ramalingam35954e82017-11-08 00:08:23 +05304237 }
Maarten Lankhorst138bdac2018-10-11 12:04:48 +02004238 drm_connector_list_iter_end(&conn_iter);
4239
4240out:
4241 drm_modeset_unlock(&crtc->base.mutex);
4242 if (ret)
4243 return ret;
C, Ramalingam35954e82017-11-08 00:08:23 +05304244 }
C, Ramalingam35954e82017-11-08 00:08:23 +05304245
4246 return 0;
4247}
4248
4249DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4250
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004251static ssize_t
4252i915_fifo_underrun_reset_write(struct file *filp,
4253 const char __user *ubuf,
4254 size_t cnt, loff_t *ppos)
4255{
4256 struct drm_i915_private *dev_priv = filp->private_data;
4257 struct intel_crtc *intel_crtc;
4258 struct drm_device *dev = &dev_priv->drm;
4259 int ret;
4260 bool reset;
4261
4262 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4263 if (ret)
4264 return ret;
4265
4266 if (!reset)
4267 return cnt;
4268
4269 for_each_intel_crtc(dev, intel_crtc) {
4270 struct drm_crtc_commit *commit;
4271 struct intel_crtc_state *crtc_state;
4272
4273 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4274 if (ret)
4275 return ret;
4276
4277 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4278 commit = crtc_state->base.commit;
4279 if (commit) {
4280 ret = wait_for_completion_interruptible(&commit->hw_done);
4281 if (!ret)
4282 ret = wait_for_completion_interruptible(&commit->flip_done);
4283 }
4284
4285 if (!ret && crtc_state->base.active) {
4286 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4287 pipe_name(intel_crtc->pipe));
4288
4289 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4290 }
4291
4292 drm_modeset_unlock(&intel_crtc->base.mutex);
4293
4294 if (ret)
4295 return ret;
4296 }
4297
4298 ret = intel_fbc_reset_underrun(dev_priv);
4299 if (ret)
4300 return ret;
4301
4302 return cnt;
4303}
4304
4305static const struct file_operations i915_fifo_underrun_reset_ops = {
4306 .owner = THIS_MODULE,
4307 .open = simple_open,
4308 .write = i915_fifo_underrun_reset_write,
4309 .llseek = default_llseek,
4310};
4311
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004312static const struct drm_info_list i915_debugfs_list[] = {
Chris Wilson311bd682011-01-13 19:06:50 +00004313 {"i915_capabilities", i915_capabilities, 0},
Chris Wilson73aa8082010-09-30 11:46:12 +01004314 {"i915_gem_objects", i915_gem_object_info, 0},
Chris Wilsona6172a82009-02-11 14:26:38 +00004315 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004316 {"i915_gem_interrupt", i915_interrupt_info, 0},
Dave Gordon8b417c22015-08-12 15:43:44 +01004317 {"i915_guc_info", i915_guc_info, 0},
Alex Daifdf5d352015-08-12 15:43:37 +01004318 {"i915_guc_load_status", i915_guc_load_status_info, 0},
Alex Dai4c7e77f2015-08-12 15:43:40 +01004319 {"i915_guc_log_dump", i915_guc_log_dump, 0},
Daniele Ceraolo Spurioac58d2a2017-05-22 10:50:28 -07004320 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
Oscar Mateoa8b93702017-05-10 15:04:51 +00004321 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
Anusha Srivatsa0509ead2017-01-18 08:05:56 -08004322 {"i915_huc_load_status", i915_huc_load_status_info, 0},
Deepak Sadb4bd12014-03-31 11:30:02 +05304323 {"i915_frequency_info", i915_frequency_info, 0},
Chris Wilsonf6544492015-01-26 18:03:04 +02004324 {"i915_hangcheck_info", i915_hangcheck_info, 0},
Jesse Barnesf97108d2010-01-29 11:27:07 -08004325 {"i915_drpc_info", i915_drpc_info, 0},
Jesse Barnes23b2f8b2011-06-28 13:04:16 -07004326 {"i915_ring_freq_table", i915_ring_freq_table, 0},
Daniel Vetter9a851782015-06-18 10:30:22 +02004327 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
Jesse Barnesb5e50c32010-02-05 12:42:41 -08004328 {"i915_fbc_status", i915_fbc_status, 0},
Paulo Zanoni92d44622013-05-31 16:33:24 -03004329 {"i915_ips_status", i915_ips_status, 0},
Jesse Barnes4a9bef32010-02-05 12:47:35 -08004330 {"i915_sr_status", i915_sr_status, 0},
Chris Wilson44834a62010-08-19 16:09:23 +01004331 {"i915_opregion", i915_opregion, 0},
Jani Nikulaada8f952015-12-15 13:17:12 +02004332 {"i915_vbt", i915_vbt, 0},
Chris Wilson37811fc2010-08-25 22:45:57 +01004333 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
Ben Widawskye76d3632011-03-19 18:14:29 -07004334 {"i915_context_status", i915_context_status, 0},
Mika Kuoppalaf65367b2015-01-16 11:34:42 +02004335 {"i915_forcewake_domains", i915_forcewake_domains, 0},
Daniel Vetterea16a3c2011-12-14 13:57:16 +01004336 {"i915_swizzle_info", i915_swizzle_info, 0},
Ben Widawsky63573eb2013-07-04 11:02:07 -07004337 {"i915_llc", i915_llc, 0},
Rodrigo Vivie91fd8c2013-07-11 18:44:59 -03004338 {"i915_edp_psr_status", i915_edp_psr_status, 0},
Jesse Barnesec013e72013-08-20 10:29:23 +01004339 {"i915_energy_uJ", i915_energy_uJ, 0},
Damien Lespiau6455c872015-06-04 18:23:57 +01004340 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
Imre Deak1da51582013-11-25 17:15:35 +02004341 {"i915_power_domain_info", i915_power_domain_info, 0},
Damien Lespiaub7cec662015-10-27 14:47:01 +02004342 {"i915_dmc_info", i915_dmc_info, 0},
Jesse Barnes53f5e3c2014-02-07 12:48:15 -08004343 {"i915_display_info", i915_display_info, 0},
Chris Wilson1b365952016-10-04 21:11:31 +01004344 {"i915_engine_info", i915_engine_info, 0},
Lionel Landwerlin79e9cd52018-03-06 12:28:54 +00004345 {"i915_rcs_topology", i915_rcs_topology, 0},
Chris Wilsonc5418a82017-10-13 21:26:19 +01004346 {"i915_shrinker_info", i915_shrinker_info, 0},
Daniel Vetter728e29d2014-06-25 22:01:53 +03004347 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
Dave Airlie11bed952014-05-12 15:22:27 +10004348 {"i915_dp_mst_info", i915_dp_mst_info, 0},
Damien Lespiau1ed1ef92014-08-30 16:50:59 +01004349 {"i915_wa_registers", i915_wa_registers, 0},
Damien Lespiauc5511e42014-11-04 17:06:51 +00004350 {"i915_ddb_info", i915_ddb_info, 0},
Jeff McGee38732182015-02-13 10:27:54 -06004351 {"i915_sseu_status", i915_sseu_status, 0},
Vandana Kannana54746e2015-03-03 20:53:10 +05304352 {"i915_drrs_status", i915_drrs_status, 0},
Chris Wilson1854d5c2015-04-07 16:20:32 +01004353 {"i915_rps_boost_info", i915_rps_boost_info, 0},
Ben Gamari20172632009-02-17 20:08:50 -05004354};
Ben Gamari27c202a2009-07-01 22:26:52 -04004355#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
Ben Gamari20172632009-02-17 20:08:50 -05004356
Lespiau, Damien06c5bf82013-10-17 19:09:56 +01004357static const struct i915_debugfs_files {
Daniel Vetter34b96742013-07-04 20:49:44 +02004358 const char *name;
4359 const struct file_operations *fops;
4360} i915_debugfs_files[] = {
4361 {"i915_wedged", &i915_wedged_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004362 {"i915_cache_sharing", &i915_cache_sharing_fops},
Daniel Vetter34b96742013-07-04 20:49:44 +02004363 {"i915_gem_drop_caches", &i915_drop_caches_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004364#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
Daniel Vetter34b96742013-07-04 20:49:44 +02004365 {"i915_error_state", &i915_error_state_fops},
Chris Wilson5a4c6f12017-02-14 16:46:11 +00004366 {"i915_gpu_info", &i915_gpu_info_fops},
Chris Wilson98a2f412016-10-12 10:05:18 +01004367#endif
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +02004368 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
Ville Syrjälä369a1342014-01-22 14:36:08 +02004369 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4370 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4371 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
Ville Syrjälä4127dc42017-06-06 15:44:12 +03004372 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
Todd Previteeb3394fa2015-04-18 00:04:19 -07004373 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4374 {"i915_dp_test_type", &i915_displayport_test_type_fops},
Sagar Arun Kamble685534e2016-10-12 21:54:41 +05304375 {"i915_dp_test_active", &i915_displayport_test_active_fops},
Michał Winiarski4977a282018-03-19 10:53:40 +01004376 {"i915_guc_log_level", &i915_guc_log_level_fops},
4377 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
Kumar, Maheshd2d4f392017-08-17 19:15:29 +05304378 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
Lyude Paul9a64c652018-11-06 16:30:16 -05004379 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
C, Ramalingam35954e82017-11-08 00:08:23 +05304380 {"i915_ipc_status", &i915_ipc_status_fops},
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004381 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4382 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
Daniel Vetter34b96742013-07-04 20:49:44 +02004383};
4384
Chris Wilson1dac8912016-06-24 14:00:17 +01004385int i915_debugfs_register(struct drm_i915_private *dev_priv)
Ben Gamari20172632009-02-17 20:08:50 -05004386{
Chris Wilson91c8a322016-07-05 10:40:23 +01004387 struct drm_minor *minor = dev_priv->drm.primary;
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02004388 int i;
Chris Wilsonf3cd4742009-10-13 22:20:20 +01004389
Greg Kroah-Hartman0780f3b2019-06-13 17:52:29 +03004390 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4391 to_i915(minor->dev), &i915_forcewake_fops);
Daniel Vetter6a9c3082011-12-14 13:57:11 +01004392
Daniel Vetter34b96742013-07-04 20:49:44 +02004393 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
Greg Kroah-Hartman0780f3b2019-06-13 17:52:29 +03004394 debugfs_create_file(i915_debugfs_files[i].name,
4395 S_IRUGO | S_IWUSR,
4396 minor->debugfs_root,
4397 to_i915(minor->dev),
4398 i915_debugfs_files[i].fops);
Daniel Vetter34b96742013-07-04 20:49:44 +02004399 }
Mika Kuoppala40633212012-12-04 15:12:00 +02004400
Ben Gamari27c202a2009-07-01 22:26:52 -04004401 return drm_debugfs_create_files(i915_debugfs_list,
4402 I915_DEBUGFS_ENTRIES,
Ben Gamari20172632009-02-17 20:08:50 -05004403 minor->debugfs_root, minor);
4404}
4405
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004406struct dpcd_block {
4407 /* DPCD dump start address. */
4408 unsigned int offset;
4409 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4410 unsigned int end;
4411 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4412 size_t size;
4413 /* Only valid for eDP. */
4414 bool edp;
4415};
4416
4417static const struct dpcd_block i915_dpcd_debug[] = {
4418 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4419 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4420 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4421 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4422 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4423 { .offset = DP_SET_POWER },
4424 { .offset = DP_EDP_DPCD_REV },
4425 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4426 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4427 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4428};
4429
4430static int i915_dpcd_show(struct seq_file *m, void *data)
4431{
4432 struct drm_connector *connector = m->private;
4433 struct intel_dp *intel_dp =
4434 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
Jani Nikulae5315212019-01-16 11:15:23 +02004435 u8 buf[16];
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004436 ssize_t err;
4437 int i;
4438
Mika Kuoppala5c1a8872015-05-15 13:09:21 +03004439 if (connector->status != connector_status_connected)
4440 return -ENODEV;
4441
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004442 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4443 const struct dpcd_block *b = &i915_dpcd_debug[i];
4444 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4445
4446 if (b->edp &&
4447 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4448 continue;
4449
4450 /* low tech for now */
4451 if (WARN_ON(size > sizeof(buf)))
4452 continue;
4453
4454 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
Chris Wilson65404c82018-10-10 09:17:06 +01004455 if (err < 0)
4456 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4457 else
4458 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
kbuild test robotb3f9d7d2015-04-16 18:34:06 +08004459 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004460
4461 return 0;
4462}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004463DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004464
David Weinehallecbd6782016-08-23 12:23:56 +03004465static int i915_panel_show(struct seq_file *m, void *data)
4466{
4467 struct drm_connector *connector = m->private;
4468 struct intel_dp *intel_dp =
4469 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4470
4471 if (connector->status != connector_status_connected)
4472 return -ENODEV;
4473
4474 seq_printf(m, "Panel power up delay: %d\n",
4475 intel_dp->panel_power_up_delay);
4476 seq_printf(m, "Panel power down delay: %d\n",
4477 intel_dp->panel_power_down_delay);
4478 seq_printf(m, "Backlight on delay: %d\n",
4479 intel_dp->backlight_on_delay);
4480 seq_printf(m, "Backlight off delay: %d\n",
4481 intel_dp->backlight_off_delay);
4482
4483 return 0;
4484}
Andy Shevchenkoe4006712018-03-16 16:12:13 +02004485DEFINE_SHOW_ATTRIBUTE(i915_panel);
David Weinehallecbd6782016-08-23 12:23:56 +03004486
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304487static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4488{
4489 struct drm_connector *connector = m->private;
4490 struct intel_connector *intel_connector = to_intel_connector(connector);
4491
4492 if (connector->status != connector_status_connected)
4493 return -ENODEV;
4494
4495 /* HDCP is supported by connector */
Ramalingam Cd3dacc72018-10-29 15:15:46 +05304496 if (!intel_connector->hdcp.shim)
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304497 return -EINVAL;
4498
4499 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4500 connector->base.id);
Anshuman Guptaaed74502019-07-19 11:25:13 +05304501 intel_hdcp_info(m, intel_connector);
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304502
4503 return 0;
4504}
4505DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4506
Manasi Navaree845f092018-12-05 16:54:07 -08004507static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4508{
4509 struct drm_connector *connector = m->private;
4510 struct drm_device *dev = connector->dev;
4511 struct drm_crtc *crtc;
4512 struct intel_dp *intel_dp;
4513 struct drm_modeset_acquire_ctx ctx;
4514 struct intel_crtc_state *crtc_state = NULL;
4515 int ret = 0;
4516 bool try_again = false;
4517
4518 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4519
4520 do {
Manasi Navare6afe8922018-12-19 15:51:20 -08004521 try_again = false;
Manasi Navaree845f092018-12-05 16:54:07 -08004522 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4523 &ctx);
4524 if (ret) {
Chris Wilsonee6df562019-03-29 16:51:52 +00004525 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4526 try_again = true;
4527 continue;
4528 }
Manasi Navaree845f092018-12-05 16:54:07 -08004529 break;
4530 }
4531 crtc = connector->state->crtc;
4532 if (connector->status != connector_status_connected || !crtc) {
4533 ret = -ENODEV;
4534 break;
4535 }
4536 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4537 if (ret == -EDEADLK) {
4538 ret = drm_modeset_backoff(&ctx);
4539 if (!ret) {
4540 try_again = true;
4541 continue;
4542 }
4543 break;
4544 } else if (ret) {
4545 break;
4546 }
4547 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4548 crtc_state = to_intel_crtc_state(crtc->state);
4549 seq_printf(m, "DSC_Enabled: %s\n",
4550 yesno(crtc_state->dsc_params.compression_enable));
Radhakrishna Sripadafed85692019-01-09 13:14:14 -08004551 seq_printf(m, "DSC_Sink_Support: %s\n",
4552 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
Manasi Navarefeb88462019-04-05 15:48:21 -07004553 seq_printf(m, "Force_DSC_Enable: %s\n",
4554 yesno(intel_dp->force_dsc_en));
Manasi Navaree845f092018-12-05 16:54:07 -08004555 if (!intel_dp_is_edp(intel_dp))
4556 seq_printf(m, "FEC_Sink_Support: %s\n",
4557 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4558 } while (try_again);
4559
4560 drm_modeset_drop_locks(&ctx);
4561 drm_modeset_acquire_fini(&ctx);
4562
4563 return ret;
4564}
4565
4566static ssize_t i915_dsc_fec_support_write(struct file *file,
4567 const char __user *ubuf,
4568 size_t len, loff_t *offp)
4569{
4570 bool dsc_enable = false;
4571 int ret;
4572 struct drm_connector *connector =
4573 ((struct seq_file *)file->private_data)->private;
4574 struct intel_encoder *encoder = intel_attached_encoder(connector);
4575 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4576
4577 if (len == 0)
4578 return 0;
4579
4580 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4581 len);
4582
4583 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4584 if (ret < 0)
4585 return ret;
4586
4587 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4588 (dsc_enable) ? "true" : "false");
4589 intel_dp->force_dsc_en = dsc_enable;
4590
4591 *offp += len;
4592 return len;
4593}
4594
4595static int i915_dsc_fec_support_open(struct inode *inode,
4596 struct file *file)
4597{
4598 return single_open(file, i915_dsc_fec_support_show,
4599 inode->i_private);
4600}
4601
4602static const struct file_operations i915_dsc_fec_support_fops = {
4603 .owner = THIS_MODULE,
4604 .open = i915_dsc_fec_support_open,
4605 .read = seq_read,
4606 .llseek = seq_lseek,
4607 .release = single_release,
4608 .write = i915_dsc_fec_support_write
4609};
4610
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004611/**
4612 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4613 * @connector: pointer to a registered drm_connector
4614 *
4615 * Cleanup will be done by drm_connector_unregister() through a call to
4616 * drm_debugfs_connector_remove().
4617 *
4618 * Returns 0 on success, negative error codes on error.
4619 */
4620int i915_debugfs_connector_add(struct drm_connector *connector)
4621{
4622 struct dentry *root = connector->debugfs_entry;
Manasi Navaree845f092018-12-05 16:54:07 -08004623 struct drm_i915_private *dev_priv = to_i915(connector->dev);
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004624
4625 /* The connector must have been registered beforehands. */
4626 if (!root)
4627 return -ENODEV;
4628
4629 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4630 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
David Weinehallecbd6782016-08-23 12:23:56 +03004631 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4632 connector, &i915_dpcd_fops);
4633
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004634 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
David Weinehallecbd6782016-08-23 12:23:56 +03004635 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4636 connector, &i915_panel_fops);
Dhinakaran Pandiyan5b7b3082018-07-04 17:31:21 -07004637 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4638 connector, &i915_psr_sink_status_fops);
4639 }
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004640
Ramalingam Cbdc93fe2018-10-23 14:52:29 +05304641 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4642 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4643 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4644 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4645 connector, &i915_hdcp_sink_capability_fops);
4646 }
4647
Manasi Navaree845f092018-12-05 16:54:07 -08004648 if (INTEL_GEN(dev_priv) >= 10 &&
4649 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4650 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4651 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4652 connector, &i915_dsc_fec_support_fops);
4653
Jani Nikulaaa7471d2015-04-01 11:15:21 +03004654 return 0;
4655}